Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 843a9d4..e9160ce 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -1,25 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
select SHMEM
select TMPFS
- select QCOM_SCM
+ select QCOM_SCM if ARCH_QCOM
+ select QCOM_COMMAND_DB if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
- default y
help
DRM/KMS driver for MSM/snapdragon.
+config DRM_MSM_GPU_STATE
+ bool
+ depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP)
+ default y
+
config DRM_MSM_REGISTER_LOGGING
bool "MSM DRM register logging"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 261fa79..1579cf0 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/gpu/drm/msm
-ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
-ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
+ccflags-y := -I $(srctree)/$(src)
+ccflags-y += -I $(srctree)/$(src)/disp/dpu1
+ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
+ adreno/a2xx_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
@@ -58,7 +59,6 @@
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_blk.o \
disp/dpu1/dpu_hw_catalog.o \
- disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_interrupts.o \
disp/dpu1/dpu_hw_intf.o \
@@ -69,14 +69,13 @@
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_io_util.o \
- disp/dpu1/dpu_irq.o \
disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
- disp/dpu1/dpu_power_handle.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
msm_atomic.o \
+ msm_atomic_tracepoints.o \
msm_debugfs.o \
msm_drv.o \
msm_fb.o \
@@ -91,10 +90,13 @@
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
- msm_submitqueue.o
+ msm_submitqueue.o \
+ msm_gpu_tracepoints.o \
+ msm_gpummu.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
- disp/dpu1/dpu_dbg.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+
+msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 4bff0a7..14eb52f 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,14 +10,14 @@
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -239,7 +239,63 @@
enum sq_tex_filter {
SQ_TEX_FILTER_POINT = 0,
SQ_TEX_FILTER_BILINEAR = 1,
- SQ_TEX_FILTER_BICUBIC = 2,
+ SQ_TEX_FILTER_BASEMAP = 2,
+ SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+ SQ_TEX_ANISO_FILTER_DISABLED = 0,
+ SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+ SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+ SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+ SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+ SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+ SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+ SQ_TEX_DIMENSION_1D = 0,
+ SQ_TEX_DIMENSION_2D = 1,
+ SQ_TEX_DIMENSION_3D = 2,
+ SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+ SQ_TEX_BORDER_COLOR_BLACK = 0,
+ SQ_TEX_BORDER_COLOR_WHITE = 1,
+ SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+ SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+ SQ_TEX_SIGN_UNISIGNED = 0,
+ SQ_TEX_SIGN_SIGNED = 1,
+ SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+ SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+ SQ_TEX_ENDIAN_NONE = 0,
+ SQ_TEX_ENDIAN_8IN16 = 1,
+ SQ_TEX_ENDIAN_8IN32 = 2,
+ SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+ SQ_TEX_CLAMP_POLICY_D3D = 0,
+ SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+ SQ_TEX_NUM_FORMAT_FRAC = 0,
+ SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+ SQ_TEX_TYPE_0 = 0,
+ SQ_TEX_TYPE_1 = 1,
+ SQ_TEX_TYPE_2 = 2,
+ SQ_TEX_TYPE_3 = 3,
};
#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
@@ -323,6 +379,18 @@
}
#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
@@ -331,6 +399,8 @@
#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
@@ -389,12 +459,19 @@
#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
#define REG_A2XX_RBBM_INT_ACK 0x000003b6
#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
@@ -467,6 +544,19 @@
#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
+
#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -648,6 +738,18 @@
#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
#define REG_A2XX_RB_COLOR_INFO 0x00002001
#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
@@ -679,7 +781,7 @@
#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
}
#define REG_A2XX_RB_DEPTH_INFO 0x00002002
@@ -693,7 +795,7 @@
#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
@@ -1757,6 +1859,36 @@
#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+ return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
@@ -1775,14 +1907,46 @@
{
return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
}
-#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
+#define A2XX_SQ_TEX_0_TILED 0x00000002
#define REG_A2XX_SQ_TEX_1 0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+ return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+ return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED 0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+ return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
#define REG_A2XX_SQ_TEX_2 0x00000002
#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
@@ -1797,8 +1961,20 @@
{
return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
}
+#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+ return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
@@ -1823,6 +1999,12 @@
{
return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
}
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
@@ -1835,6 +2017,104 @@
{
return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
}
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4 0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5 0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+ return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+ return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644
index 0000000..1f83bc1
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 18);
+
+ /* All fields present (bits 9:0) */
+ OUT_RING(ring, 0x000003ff);
+ /* Disable/Enable Real-Time Stream processing (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+ /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+
+ OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+ OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+ OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+ /* Vertex and Pixel Shader Start Addresses in instructions
+ * (3 DWORDS per instruction) */
+ OUT_RING(ring, 0x80000180);
+ /* Maximum Contexts */
+ OUT_RING(ring, 0x00000001);
+ /* Write Confirm Interval and The CP will wait the
+ * wait_interval * 16 clocks between polling */
+ OUT_RING(ring, 0x00000000);
+ /* NQ and External Memory Swap */
+ OUT_RING(ring, 0x00000000);
+ /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+ OUT_RING(ring, 0x200001f2);
+ /* Disable header dumping and Header dump address */
+ OUT_RING(ring, 0x00000000);
+ /* Header dump size */
+ OUT_RING(ring, 0x00000000);
+
+ /* enable protected mode */
+ OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+ return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ dma_addr_t pt_base, tran_error;
+ uint32_t *ptr, len;
+ int i, ret;
+
+ msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+ DBG("%s", gpu->name);
+
+ /* halt ME to avoid ucode upload issues on a20x */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ /* note: kgsl uses 0x00000001 after first reset on a22x */
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+ msleep(30);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+ /* note: kgsl uses 0x0000ffff for a20x */
+ gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+ /* MPU: physical range */
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+ A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+ /* same as parameters in adreno_gpu */
+ gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+ A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+ gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+ gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+ A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+ A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+ A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+ if (!adreno_is_a20x(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+ gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+ gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+ /* note: gsl doesn't set this */
+ gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+ A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+ gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+ AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+ AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB1_INT_MASK |
+ AXXX_CP_INT_CNTL_RB_INT_MASK);
+ gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+ A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+ A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+ A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+ for (i = 3; i <= 5; i++)
+ if ((SZ_16K << i) == adreno_gpu->gmem)
+ break;
+ gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a2xx_dump(gpu);
+
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+ gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+ adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+ A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t mstatus, status;
+
+ mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+ dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+ dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+ gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+ status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+ /* only RB_INT is expected */
+ if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+ dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+ dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+ }
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+ return state;
+}
+
+/* Register offset defines for A2XX - copy of A3XX */
+static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+};
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+ struct a2xx_gpu *a2xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a2xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+ if (!a2xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a2xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ if (adreno_is_a20x(adreno_gpu))
+ adreno_gpu->registers = a200_registers;
+ else if (adreno_is_a225(adreno_gpu))
+ adreno_gpu->registers = a225_registers;
+ else
+ adreno_gpu->registers = a220_registers;
+
+ adreno_gpu->reg_offsets = a2xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ if (!gpu->aspace) {
+ dev_err(dev->dev, "No memory protection without MMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
+
+fail:
+ if (a2xx_gpu)
+ a2xx_destroy(&a2xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644
index 0000000..02fba2c
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+ struct adreno_gpu base;
+ bool pm_enabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 645a19a..17059f2 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,14 +10,14 @@
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 669c2d4..5f7e980 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_MSM_OCMEM
@@ -395,19 +384,17 @@
0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
- 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
- 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
- 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
- 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
- 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
- 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
- 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
- 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
- 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
- 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
- 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
- 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
- 0x303c, 0x303c, 0x305e, 0x305f,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
+ 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
+ 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
+ 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
+ 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
+ 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
+ 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
+ 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
+ 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+ 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
+ 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
~0 /* sentinel */
};
@@ -481,7 +468,7 @@
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a3xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -528,7 +515,7 @@
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index ab60dc9..5dc33e5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __A3XX_GPU_H__
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 19565e8..9b51e25 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -10,14 +10,14 @@
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 7c4e6dc..ab2b752 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -1,14 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include "a4xx_gpu.h"
#ifdef CONFIG_MSM_OCMEM
@@ -561,7 +552,7 @@
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a4xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -608,7 +599,7 @@
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index f757184..d506311 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -1,14 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __A4XX_GPU_H__
#define __A4XX_GPU_H__
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 182d37f..4a61d4e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,19 +8,19 @@
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+- /home/ubuntu/envytools/envytools/rnndb/./adreno.xml ( 501 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/a2xx.xml ( 79608 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_common.xml ( 14239 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/adreno_pm4.xml ( 43155 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/a5xx.xml ( 147291 bytes, from 2019-05-29 14:51:41)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx.xml ( 148461 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2019-05-29 01:28:15)
+- /home/ubuntu/envytools/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2019-05-29 01:28:15)
-Copyright (C) 2013-2018 by the following authors:
+Copyright (C) 2013-2019 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -2148,6 +2148,8 @@
#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04
+
#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 059ec7d..075ecce 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -1,19 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
-
#include <linux/types.h>
#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "a5xx_gpu.h"
@@ -130,16 +123,14 @@
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->pm4_bo);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->pfp_bo);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
@@ -160,7 +151,6 @@
int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
{
struct drm_device *dev;
- struct dentry *ent;
int ret;
if (!minor)
@@ -173,15 +163,12 @@
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
return ret;
}
- ent = debugfs_create_file("reset", S_IWUGO,
- minor->debugfs_root,
- dev, &reset_fops);
- if (!ent)
- return -ENOMEM;
+ debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
+ &reset_fops);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ab1d930..e9c55d1 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1,26 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
-#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
-#include <linux/iopoll.h>
#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -31,94 +18,6 @@
#define GPU_PAS_ID 13
-static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
-{
- struct device *dev = &gpu->pdev->dev;
- const struct firmware *fw;
- struct device_node *np;
- struct resource r;
- phys_addr_t mem_phys;
- ssize_t mem_size;
- void *mem_region = NULL;
- int ret;
-
- if (!IS_ENABLED(CONFIG_ARCH_QCOM))
- return -EINVAL;
-
- np = of_get_child_by_name(dev->of_node, "zap-shader");
- if (!np)
- return -ENODEV;
-
- np = of_parse_phandle(np, "memory-region", 0);
- if (!np)
- return -EINVAL;
-
- ret = of_address_to_resource(np, 0, &r);
- if (ret)
- return ret;
-
- mem_phys = r.start;
- mem_size = resource_size(&r);
-
- /* Request the MDT file for the firmware */
- fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
- if (IS_ERR(fw)) {
- DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
- return PTR_ERR(fw);
- }
-
- /* Figure out how much memory we need */
- mem_size = qcom_mdt_get_size(fw);
- if (mem_size < 0) {
- ret = mem_size;
- goto out;
- }
-
- /* Allocate memory for the firmware image */
- mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
- if (!mem_region) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Load the rest of the MDT
- *
- * Note that we could be dealing with two different paths, since
- * with upstream linux-firmware it would be in a qcom/ subdir..
- * adreno_request_fw() handles this, but qcom_mdt_load() does
- * not. But since we've already gotten thru adreno_request_fw()
- * we know which of the two cases it is:
- */
- if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
- ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- } else {
- char *newname;
-
- newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
-
- ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- kfree(newname);
- }
- if (ret)
- goto out;
-
- /* Send the image to the secure world */
- ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
- if (ret)
- DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
-
-out:
- if (mem_region)
- memunmap(mem_region);
-
- release_firmware(fw);
-
- return ret;
-}
-
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -160,6 +59,7 @@
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
@@ -250,6 +150,7 @@
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
@@ -410,12 +311,18 @@
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
gpu_write(gpu, a5xx_hwcg[i].offset,
state ? a5xx_hwcg[i].value : 0);
+ if (adreno_is_a540(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
+ }
+
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
@@ -511,13 +418,16 @@
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
+
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
}
if (!a5xx_gpu->pfp_bo) {
@@ -527,10 +437,12 @@
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -559,8 +471,6 @@
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct platform_device *pdev = gpu->pdev;
int ret;
/*
@@ -570,23 +480,9 @@
if (loaded)
return a5xx_zap_shader_resume(gpu);
- /* We need SCM to be able to load the firmware */
- if (!qcom_scm_is_available()) {
- DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
- return -EPROBE_DEFER;
- }
-
- /* Each GPU has a target specific zap shader firmware name to use */
- if (!adreno_gpu->info->zapfw) {
- DRM_DEV_ERROR(&pdev->dev,
- "Zap shader firmware file not specified for this target\n");
- return -ENODEV;
- }
-
- ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
loaded = !ret;
-
return ret;
}
@@ -610,6 +506,9 @@
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
@@ -670,7 +569,10 @@
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
- gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
@@ -695,6 +597,8 @@
/* Set the highest bank bit */
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, 2);
/* Protect registers from the CP */
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
@@ -745,6 +649,30 @@
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+ /* Put the GPU into 64 bit by default */
+ gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /*
+ * VPC corner case with local memory load kill leads to corrupt
+ * internal state. Normal Disable does not work for all a5x chips.
+ * So do the following setting to disable it.
+ */
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
+ gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
+ }
+
ret = adreno_hw_init(gpu);
if (ret)
return ret;
@@ -841,20 +769,17 @@
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
@@ -1028,7 +953,7 @@
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
- dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
@@ -1134,7 +1059,7 @@
static void a5xx_dump(struct msm_gpu *gpu)
{
- dev_info(gpu->dev->dev, "status: %08x\n",
+ DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -1211,10 +1136,6 @@
u32 *hlsqregs;
};
-#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
- readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
- interval, timeout)
-
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
@@ -1222,19 +1143,10 @@
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
&dumper->bo, &dumper->iova);
- if (IS_ERR(dumper->ptr))
- return PTR_ERR(dumper->ptr);
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
- return 0;
-}
-
-static void a5xx_crashdumper_free(struct msm_gpu *gpu,
- struct a5xx_crashdumper *dumper)
-{
- msm_gem_put_iova(dumper->bo, gpu->aspace);
- msm_gem_put_vaddr(dumper->bo);
-
- drm_gem_object_unreference(dumper->bo);
+ return PTR_ERR_OR_ZERO(dumper->ptr);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1329,7 +1241,7 @@
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
return;
}
@@ -1337,7 +1249,7 @@
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1436,12 +1348,22 @@
return a5xx_gpu->cur_ring;
}
-static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
+static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
- REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+ u64 busy_cycles, busy_time;
- return 0;
+ busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+ REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+
+ busy_time = busy_cycles - gpu->devfreq.busy_cycles;
+ do_div(busy_time, clk_get_rate(gpu->core_clk) / 1000000);
+
+ gpu->devfreq.busy_cycles = busy_cycles;
+
+ if (WARN_ON(busy_time > ~0LU))
+ return ~0LU;
+
+ return (unsigned long)busy_time;
}
static const struct adreno_gpu_funcs funcs = {
@@ -1498,7 +1420,7 @@
int ret;
if (!pdev) {
- dev_err(dev->dev, "No A5XX device is defined\n");
+ DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
return ERR_PTR(-ENXIO);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 7d71860..833468c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -1,14 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __A5XX_GPU_H__
#define __A5XX_GPU_H__
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index e9c0e56..a3a06db 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -1,14 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/pm_opp.h>
@@ -32,6 +23,18 @@
#define AGC_POWER_CONFIG_PRODUCTION_ID 1
#define AGC_INIT_MSG_VALUE 0xBABEFACE
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
static struct {
uint32_t reg;
uint32_t value;
@@ -116,7 +119,7 @@
}
/* Setup thermal limit management */
-static void a5xx_lm_setup(struct msm_gpu *gpu)
+static void a530_lm_setup(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@@ -165,6 +168,45 @@
gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
}
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 config;
+
+ /* The battery current limiter isn't enabled for A540 */
+ config = AGC_LM_CONFIG_BCL_DISABLED;
+ config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+ /* For now disable GPMU side throttling */
+ config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ /* Fixed at 6000 for now */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+ PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
/* Enable SP/TP cpower collapse */
static void a5xx_pc_init(struct msm_gpu *gpu)
{
@@ -206,7 +248,8 @@
return -EINVAL;
}
- gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
/* Kick off the GPMU */
gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
@@ -220,12 +263,26 @@
DRM_ERROR("%s: GPMU firmware initialization timed out\n",
gpu->name);
+ if (!adreno_is_a530(adreno_gpu)) {
+ u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+ if (val)
+ DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
+ gpu->name, val);
+ }
+
return 0;
}
/* Enable limits management */
static void a5xx_lm_enable(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* This init sequence only applies to A530 */
+ if (!adreno_is_a530(adreno_gpu))
+ return;
+
gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
@@ -237,10 +294,14 @@
int a5xx_power_init(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Set up the limits management */
- a5xx_lm_setup(gpu);
+ if (adreno_is_a530(adreno_gpu))
+ a530_lm_setup(gpu);
+ else
+ a540_lm_setup(gpu);
/* Set up SP/TP power collpase */
a5xx_pc_init(gpu);
@@ -298,7 +359,9 @@
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
- goto err;
+ return;
+
+ msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
while (cmds_size > 0) {
int i;
@@ -317,15 +380,4 @@
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
-
- return;
-err:
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- if (a5xx_gpu->gpmu_bo)
- drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
-
- a5xx_gpu->gpmu_bo = NULL;
- a5xx_gpu->gpmu_iova = 0;
- a5xx_gpu->gpmu_dwords = 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 970c796..9cf9353 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -1,14 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include "msm_gem.h"
@@ -92,7 +83,7 @@
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
- dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
queue_work(priv->wq, &gpu->recover_work);
}
@@ -188,7 +179,7 @@
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
- dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
queue_work(priv->wq, &gpu->recover_work);
return;
@@ -208,6 +199,13 @@
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings == 1)
+ return;
+
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
@@ -220,9 +218,6 @@
/* Reset the preemption state */
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
-
- /* Always come up on rb 0 */
- a5xx_gpu->cur_ring = gpu->rb[0];
}
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
@@ -241,6 +236,8 @@
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ msm_gem_object_set_name(bo, "preempt");
+
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -263,18 +260,8 @@
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
- for (i = 0; i < gpu->nr_rings; i++) {
- if (!a5xx_gpu->preempt_bo[i])
- continue;
-
- msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
-
- if (a5xx_gpu->preempt_iova[i])
- msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
-
- drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
- a5xx_gpu->preempt_bo[i] = NULL;
- }
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
}
void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 87eab51..f44553e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -10,14 +10,14 @@
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -268,8 +268,687 @@
DEPTH6_32 = 4,
};
+enum a6xx_shader_id {
+ A6XX_TP0_TMO_DATA = 9,
+ A6XX_TP0_SMO_DATA = 10,
+ A6XX_TP0_MIPMAP_BASE_DATA = 11,
+ A6XX_TP1_TMO_DATA = 25,
+ A6XX_TP1_SMO_DATA = 26,
+ A6XX_TP1_MIPMAP_BASE_DATA = 27,
+ A6XX_SP_INST_DATA = 41,
+ A6XX_SP_LB_0_DATA = 42,
+ A6XX_SP_LB_1_DATA = 43,
+ A6XX_SP_LB_2_DATA = 44,
+ A6XX_SP_LB_3_DATA = 45,
+ A6XX_SP_LB_4_DATA = 46,
+ A6XX_SP_LB_5_DATA = 47,
+ A6XX_SP_CB_BINDLESS_DATA = 48,
+ A6XX_SP_CB_LEGACY_DATA = 49,
+ A6XX_SP_UAV_DATA = 50,
+ A6XX_SP_INST_TAG = 51,
+ A6XX_SP_CB_BINDLESS_TAG = 52,
+ A6XX_SP_TMO_UMO_TAG = 53,
+ A6XX_SP_SMO_TAG = 54,
+ A6XX_SP_STATE_DATA = 55,
+ A6XX_HLSQ_CHUNK_CVS_RAM = 73,
+ A6XX_HLSQ_CHUNK_CPS_RAM = 74,
+ A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
+ A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
+ A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
+ A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
+ A6XX_HLSQ_CVS_MISC_RAM = 80,
+ A6XX_HLSQ_CPS_MISC_RAM = 81,
+ A6XX_HLSQ_INST_RAM = 82,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM = 83,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM = 84,
+ A6XX_HLSQ_CVS_MISC_RAM_TAG = 85,
+ A6XX_HLSQ_CPS_MISC_RAM_TAG = 86,
+ A6XX_HLSQ_INST_RAM_TAG = 87,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
+ A6XX_HLSQ_PWR_REST_RAM = 90,
+ A6XX_HLSQ_PWR_REST_TAG = 91,
+ A6XX_HLSQ_DATAPATH_META = 96,
+ A6XX_HLSQ_FRONTEND_META = 97,
+ A6XX_HLSQ_INDIRECT_META = 98,
+ A6XX_HLSQ_BACKEND_META = 99,
+};
+
+enum a6xx_debugbus_id {
+ A6XX_DBGBUS_CP = 1,
+ A6XX_DBGBUS_RBBM = 2,
+ A6XX_DBGBUS_VBIF = 3,
+ A6XX_DBGBUS_HLSQ = 4,
+ A6XX_DBGBUS_UCHE = 5,
+ A6XX_DBGBUS_DPM = 6,
+ A6XX_DBGBUS_TESS = 7,
+ A6XX_DBGBUS_PC = 8,
+ A6XX_DBGBUS_VFDP = 9,
+ A6XX_DBGBUS_VPC = 10,
+ A6XX_DBGBUS_TSE = 11,
+ A6XX_DBGBUS_RAS = 12,
+ A6XX_DBGBUS_VSC = 13,
+ A6XX_DBGBUS_COM = 14,
+ A6XX_DBGBUS_LRZ = 16,
+ A6XX_DBGBUS_A2D = 17,
+ A6XX_DBGBUS_CCUFCHE = 18,
+ A6XX_DBGBUS_GMU_CX = 19,
+ A6XX_DBGBUS_RBP = 20,
+ A6XX_DBGBUS_DCS = 21,
+ A6XX_DBGBUS_DBGC = 22,
+ A6XX_DBGBUS_CX = 23,
+ A6XX_DBGBUS_GMU_GX = 24,
+ A6XX_DBGBUS_TPFCHE = 25,
+ A6XX_DBGBUS_GBIF_GX = 26,
+ A6XX_DBGBUS_GPC = 29,
+ A6XX_DBGBUS_LARC = 30,
+ A6XX_DBGBUS_HLSQ_SPTP = 31,
+ A6XX_DBGBUS_RB_0 = 32,
+ A6XX_DBGBUS_RB_1 = 33,
+ A6XX_DBGBUS_UCHE_WRAPPER = 36,
+ A6XX_DBGBUS_CCU_0 = 40,
+ A6XX_DBGBUS_CCU_1 = 41,
+ A6XX_DBGBUS_VFD_0 = 56,
+ A6XX_DBGBUS_VFD_1 = 57,
+ A6XX_DBGBUS_VFD_2 = 58,
+ A6XX_DBGBUS_VFD_3 = 59,
+ A6XX_DBGBUS_SP_0 = 64,
+ A6XX_DBGBUS_SP_1 = 65,
+ A6XX_DBGBUS_TPL1_0 = 72,
+ A6XX_DBGBUS_TPL1_1 = 73,
+ A6XX_DBGBUS_TPL1_2 = 74,
+ A6XX_DBGBUS_TPL1_3 = 75,
+};
+
enum a6xx_cp_perfcounter_select {
PERF_CP_ALWAYS_COUNT = 0,
+ PERF_CP_BUSY_GFX_CORE_IDLE = 1,
+ PERF_CP_BUSY_CYCLES = 2,
+ PERF_CP_NUM_PREEMPTIONS = 3,
+ PERF_CP_PREEMPTION_REACTION_DELAY = 4,
+ PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5,
+ PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6,
+ PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7,
+ PERF_CP_PREDICATED_DRAWS_KILLED = 8,
+ PERF_CP_MODE_SWITCH = 9,
+ PERF_CP_ZPASS_DONE = 10,
+ PERF_CP_CONTEXT_DONE = 11,
+ PERF_CP_CACHE_FLUSH = 12,
+ PERF_CP_LONG_PREEMPTIONS = 13,
+ PERF_CP_SQE_I_CACHE_STARVE = 14,
+ PERF_CP_SQE_IDLE = 15,
+ PERF_CP_SQE_PM4_STARVE_RB_IB = 16,
+ PERF_CP_SQE_PM4_STARVE_SDS = 17,
+ PERF_CP_SQE_MRB_STARVE = 18,
+ PERF_CP_SQE_RRB_STARVE = 19,
+ PERF_CP_SQE_VSD_STARVE = 20,
+ PERF_CP_VSD_DECODE_STARVE = 21,
+ PERF_CP_SQE_PIPE_OUT_STALL = 22,
+ PERF_CP_SQE_SYNC_STALL = 23,
+ PERF_CP_SQE_PM4_WFI_STALL = 24,
+ PERF_CP_SQE_SYS_WFI_STALL = 25,
+ PERF_CP_SQE_T4_EXEC = 26,
+ PERF_CP_SQE_LOAD_STATE_EXEC = 27,
+ PERF_CP_SQE_SAVE_SDS_STATE = 28,
+ PERF_CP_SQE_DRAW_EXEC = 29,
+ PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30,
+ PERF_CP_SQE_EXEC_PROFILED = 31,
+ PERF_CP_MEMORY_POOL_EMPTY = 32,
+ PERF_CP_MEMORY_POOL_SYNC_STALL = 33,
+ PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34,
+ PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35,
+ PERF_CP_AHB_STALL_SQE_GMU = 36,
+ PERF_CP_AHB_STALL_SQE_WR_OTHER = 37,
+ PERF_CP_AHB_STALL_SQE_RD_OTHER = 38,
+ PERF_CP_CLUSTER0_EMPTY = 39,
+ PERF_CP_CLUSTER1_EMPTY = 40,
+ PERF_CP_CLUSTER2_EMPTY = 41,
+ PERF_CP_CLUSTER3_EMPTY = 42,
+ PERF_CP_CLUSTER4_EMPTY = 43,
+ PERF_CP_CLUSTER5_EMPTY = 44,
+ PERF_CP_PM4_DATA = 45,
+ PERF_CP_PM4_HEADERS = 46,
+ PERF_CP_VBIF_READ_BEATS = 47,
+ PERF_CP_VBIF_WRITE_BEATS = 48,
+ PERF_CP_SQE_INSTR_COUNTER = 49,
+};
+
+enum a6xx_rbbm_perfcounter_select {
+ PERF_RBBM_ALWAYS_COUNT = 0,
+ PERF_RBBM_ALWAYS_ON = 1,
+ PERF_RBBM_TSE_BUSY = 2,
+ PERF_RBBM_RAS_BUSY = 3,
+ PERF_RBBM_PC_DCALL_BUSY = 4,
+ PERF_RBBM_PC_VSD_BUSY = 5,
+ PERF_RBBM_STATUS_MASKED = 6,
+ PERF_RBBM_COM_BUSY = 7,
+ PERF_RBBM_DCOM_BUSY = 8,
+ PERF_RBBM_VBIF_BUSY = 9,
+ PERF_RBBM_VSC_BUSY = 10,
+ PERF_RBBM_TESS_BUSY = 11,
+ PERF_RBBM_UCHE_BUSY = 12,
+ PERF_RBBM_HLSQ_BUSY = 13,
+};
+
+enum a6xx_pc_perfcounter_select {
+ PERF_PC_BUSY_CYCLES = 0,
+ PERF_PC_WORKING_CYCLES = 1,
+ PERF_PC_STALL_CYCLES_VFD = 2,
+ PERF_PC_STALL_CYCLES_TSE = 3,
+ PERF_PC_STALL_CYCLES_VPC = 4,
+ PERF_PC_STALL_CYCLES_UCHE = 5,
+ PERF_PC_STALL_CYCLES_TESS = 6,
+ PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
+ PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
+ PERF_PC_PASS1_TF_STALL_CYCLES = 9,
+ PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
+ PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
+ PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
+ PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
+ PERF_PC_STARVE_CYCLES_DI = 14,
+ PERF_PC_VIS_STREAMS_LOADED = 15,
+ PERF_PC_INSTANCES = 16,
+ PERF_PC_VPC_PRIMITIVES = 17,
+ PERF_PC_DEAD_PRIM = 18,
+ PERF_PC_LIVE_PRIM = 19,
+ PERF_PC_VERTEX_HITS = 20,
+ PERF_PC_IA_VERTICES = 21,
+ PERF_PC_IA_PRIMITIVES = 22,
+ PERF_PC_GS_PRIMITIVES = 23,
+ PERF_PC_HS_INVOCATIONS = 24,
+ PERF_PC_DS_INVOCATIONS = 25,
+ PERF_PC_VS_INVOCATIONS = 26,
+ PERF_PC_GS_INVOCATIONS = 27,
+ PERF_PC_DS_PRIMITIVES = 28,
+ PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
+ PERF_PC_3D_DRAWCALLS = 30,
+ PERF_PC_2D_DRAWCALLS = 31,
+ PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
+ PERF_TESS_BUSY_CYCLES = 33,
+ PERF_TESS_WORKING_CYCLES = 34,
+ PERF_TESS_STALL_CYCLES_PC = 35,
+ PERF_TESS_STARVE_CYCLES_PC = 36,
+ PERF_PC_TSE_TRANSACTION = 37,
+ PERF_PC_TSE_VERTEX = 38,
+ PERF_PC_TESS_PC_UV_TRANS = 39,
+ PERF_PC_TESS_PC_UV_PATCHES = 40,
+ PERF_PC_TESS_FACTOR_TRANS = 41,
+};
+
+enum a6xx_vfd_perfcounter_select {
+ PERF_VFD_BUSY_CYCLES = 0,
+ PERF_VFD_STALL_CYCLES_UCHE = 1,
+ PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
+ PERF_VFD_STALL_CYCLES_SP_INFO = 3,
+ PERF_VFD_STALL_CYCLES_SP_ATTR = 4,
+ PERF_VFD_STARVE_CYCLES_UCHE = 5,
+ PERF_VFD_RBUFFER_FULL = 6,
+ PERF_VFD_ATTR_INFO_FIFO_FULL = 7,
+ PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8,
+ PERF_VFD_NUM_ATTRIBUTES = 9,
+ PERF_VFD_UPPER_SHADER_FIBERS = 10,
+ PERF_VFD_LOWER_SHADER_FIBERS = 11,
+ PERF_VFD_MODE_0_FIBERS = 12,
+ PERF_VFD_MODE_1_FIBERS = 13,
+ PERF_VFD_MODE_2_FIBERS = 14,
+ PERF_VFD_MODE_3_FIBERS = 15,
+ PERF_VFD_MODE_4_FIBERS = 16,
+ PERF_VFD_TOTAL_VERTICES = 17,
+ PERF_VFDP_STALL_CYCLES_VFD = 18,
+ PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19,
+ PERF_VFDP_STALL_CYCLES_VFD_PROG = 20,
+ PERF_VFDP_STARVE_CYCLES_PC = 21,
+ PERF_VFDP_VS_STAGE_WAVES = 22,
+};
+
+enum a6xx_hlsq_perfcounter_select {
+ PERF_HLSQ_BUSY_CYCLES = 0,
+ PERF_HLSQ_STALL_CYCLES_UCHE = 1,
+ PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
+ PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
+ PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
+ PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
+ PERF_HLSQ_FS_STAGE_1X_WAVES = 6,
+ PERF_HLSQ_FS_STAGE_2X_WAVES = 7,
+ PERF_HLSQ_QUADS = 8,
+ PERF_HLSQ_CS_INVOCATIONS = 9,
+ PERF_HLSQ_COMPUTE_DRAWCALLS = 10,
+ PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11,
+ PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12,
+ PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13,
+ PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14,
+ PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15,
+ PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16,
+ PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17,
+ PERF_HLSQ_STALL_CYCLES_VPC = 18,
+ PERF_HLSQ_PIXELS = 19,
+ PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20,
+};
+
+enum a6xx_vpc_perfcounter_select {
+ PERF_VPC_BUSY_CYCLES = 0,
+ PERF_VPC_WORKING_CYCLES = 1,
+ PERF_VPC_STALL_CYCLES_UCHE = 2,
+ PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
+ PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
+ PERF_VPC_STALL_CYCLES_PC = 5,
+ PERF_VPC_STALL_CYCLES_SP_LM = 6,
+ PERF_VPC_STARVE_CYCLES_SP = 7,
+ PERF_VPC_STARVE_CYCLES_LRZ = 8,
+ PERF_VPC_PC_PRIMITIVES = 9,
+ PERF_VPC_SP_COMPONENTS = 10,
+ PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11,
+ PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12,
+ PERF_VPC_RB_VISIBLE_PRIMITIVES = 13,
+ PERF_VPC_LM_TRANSACTION = 14,
+ PERF_VPC_STREAMOUT_TRANSACTION = 15,
+ PERF_VPC_VS_BUSY_CYCLES = 16,
+ PERF_VPC_PS_BUSY_CYCLES = 17,
+ PERF_VPC_VS_WORKING_CYCLES = 18,
+ PERF_VPC_PS_WORKING_CYCLES = 19,
+ PERF_VPC_STARVE_CYCLES_RB = 20,
+ PERF_VPC_NUM_VPCRAM_READ_POS = 21,
+ PERF_VPC_WIT_FULL_CYCLES = 22,
+ PERF_VPC_VPCRAM_FULL_CYCLES = 23,
+ PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24,
+ PERF_VPC_NUM_VPCRAM_WRITE = 25,
+ PERF_VPC_NUM_VPCRAM_READ_SO = 26,
+ PERF_VPC_NUM_ATTR_REQ_LM = 27,
+};
+
+enum a6xx_tse_perfcounter_select {
+ PERF_TSE_BUSY_CYCLES = 0,
+ PERF_TSE_CLIPPING_CYCLES = 1,
+ PERF_TSE_STALL_CYCLES_RAS = 2,
+ PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
+ PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
+ PERF_TSE_STARVE_CYCLES_PC = 5,
+ PERF_TSE_INPUT_PRIM = 6,
+ PERF_TSE_INPUT_NULL_PRIM = 7,
+ PERF_TSE_TRIVAL_REJ_PRIM = 8,
+ PERF_TSE_CLIPPED_PRIM = 9,
+ PERF_TSE_ZERO_AREA_PRIM = 10,
+ PERF_TSE_FACENESS_CULLED_PRIM = 11,
+ PERF_TSE_ZERO_PIXEL_PRIM = 12,
+ PERF_TSE_OUTPUT_NULL_PRIM = 13,
+ PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
+ PERF_TSE_CINVOCATION = 15,
+ PERF_TSE_CPRIMITIVES = 16,
+ PERF_TSE_2D_INPUT_PRIM = 17,
+ PERF_TSE_2D_ALIVE_CYCLES = 18,
+ PERF_TSE_CLIP_PLANES = 19,
+};
+
+enum a6xx_ras_perfcounter_select {
+ PERF_RAS_BUSY_CYCLES = 0,
+ PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
+ PERF_RAS_STALL_CYCLES_LRZ = 2,
+ PERF_RAS_STARVE_CYCLES_TSE = 3,
+ PERF_RAS_SUPER_TILES = 4,
+ PERF_RAS_8X4_TILES = 5,
+ PERF_RAS_MASKGEN_ACTIVE = 6,
+ PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
+ PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
+ PERF_RAS_PRIM_KILLED_INVISILBE = 9,
+ PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10,
+ PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11,
+ PERF_RAS_BLOCKS = 12,
+};
+
+enum a6xx_uche_perfcounter_select {
+ PERF_UCHE_BUSY_CYCLES = 0,
+ PERF_UCHE_STALL_CYCLES_ARBITER = 1,
+ PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
+ PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
+ PERF_UCHE_VBIF_READ_BEATS_TP = 4,
+ PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
+ PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
+ PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
+ PERF_UCHE_VBIF_READ_BEATS_SP = 8,
+ PERF_UCHE_READ_REQUESTS_TP = 9,
+ PERF_UCHE_READ_REQUESTS_VFD = 10,
+ PERF_UCHE_READ_REQUESTS_HLSQ = 11,
+ PERF_UCHE_READ_REQUESTS_LRZ = 12,
+ PERF_UCHE_READ_REQUESTS_SP = 13,
+ PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
+ PERF_UCHE_WRITE_REQUESTS_SP = 15,
+ PERF_UCHE_WRITE_REQUESTS_VPC = 16,
+ PERF_UCHE_WRITE_REQUESTS_VSC = 17,
+ PERF_UCHE_EVICTS = 18,
+ PERF_UCHE_BANK_REQ0 = 19,
+ PERF_UCHE_BANK_REQ1 = 20,
+ PERF_UCHE_BANK_REQ2 = 21,
+ PERF_UCHE_BANK_REQ3 = 22,
+ PERF_UCHE_BANK_REQ4 = 23,
+ PERF_UCHE_BANK_REQ5 = 24,
+ PERF_UCHE_BANK_REQ6 = 25,
+ PERF_UCHE_BANK_REQ7 = 26,
+ PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
+ PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
+ PERF_UCHE_GMEM_READ_BEATS = 29,
+ PERF_UCHE_TPH_REF_FULL = 30,
+ PERF_UCHE_TPH_VICTIM_FULL = 31,
+ PERF_UCHE_TPH_EXT_FULL = 32,
+ PERF_UCHE_VBIF_STALL_WRITE_DATA = 33,
+ PERF_UCHE_DCMP_LATENCY_SAMPLES = 34,
+ PERF_UCHE_DCMP_LATENCY_CYCLES = 35,
+ PERF_UCHE_VBIF_READ_BEATS_PC = 36,
+ PERF_UCHE_READ_REQUESTS_PC = 37,
+ PERF_UCHE_RAM_READ_REQ = 38,
+ PERF_UCHE_RAM_WRITE_REQ = 39,
+};
+
+enum a6xx_tp_perfcounter_select {
+ PERF_TP_BUSY_CYCLES = 0,
+ PERF_TP_STALL_CYCLES_UCHE = 1,
+ PERF_TP_LATENCY_CYCLES = 2,
+ PERF_TP_LATENCY_TRANS = 3,
+ PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
+ PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
+ PERF_TP_L1_CACHELINE_REQUESTS = 6,
+ PERF_TP_L1_CACHELINE_MISSES = 7,
+ PERF_TP_SP_TP_TRANS = 8,
+ PERF_TP_TP_SP_TRANS = 9,
+ PERF_TP_OUTPUT_PIXELS = 10,
+ PERF_TP_FILTER_WORKLOAD_16BIT = 11,
+ PERF_TP_FILTER_WORKLOAD_32BIT = 12,
+ PERF_TP_QUADS_RECEIVED = 13,
+ PERF_TP_QUADS_OFFSET = 14,
+ PERF_TP_QUADS_SHADOW = 15,
+ PERF_TP_QUADS_ARRAY = 16,
+ PERF_TP_QUADS_GRADIENT = 17,
+ PERF_TP_QUADS_1D = 18,
+ PERF_TP_QUADS_2D = 19,
+ PERF_TP_QUADS_BUFFER = 20,
+ PERF_TP_QUADS_3D = 21,
+ PERF_TP_QUADS_CUBE = 22,
+ PERF_TP_DIVERGENT_QUADS_RECEIVED = 23,
+ PERF_TP_PRT_NON_RESIDENT_EVENTS = 24,
+ PERF_TP_OUTPUT_PIXELS_POINT = 25,
+ PERF_TP_OUTPUT_PIXELS_BILINEAR = 26,
+ PERF_TP_OUTPUT_PIXELS_MIP = 27,
+ PERF_TP_OUTPUT_PIXELS_ANISO = 28,
+ PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29,
+ PERF_TP_FLAG_CACHE_REQUESTS = 30,
+ PERF_TP_FLAG_CACHE_MISSES = 31,
+ PERF_TP_L1_5_L2_REQUESTS = 32,
+ PERF_TP_2D_OUTPUT_PIXELS = 33,
+ PERF_TP_2D_OUTPUT_PIXELS_POINT = 34,
+ PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35,
+ PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36,
+ PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37,
+ PERF_TP_TPA2TPC_TRANS = 38,
+ PERF_TP_L1_MISSES_ASTC_1TILE = 39,
+ PERF_TP_L1_MISSES_ASTC_2TILE = 40,
+ PERF_TP_L1_MISSES_ASTC_4TILE = 41,
+ PERF_TP_L1_5_L2_COMPRESS_REQS = 42,
+ PERF_TP_L1_5_L2_COMPRESS_MISS = 43,
+ PERF_TP_L1_BANK_CONFLICT = 44,
+ PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45,
+ PERF_TP_L1_5_MISS_LATENCY_TRANS = 46,
+ PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47,
+ PERF_TP_FRONTEND_WORKING_CYCLES = 48,
+ PERF_TP_L1_TAG_WORKING_CYCLES = 49,
+ PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50,
+ PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51,
+ PERF_TP_BACKEND_WORKING_CYCLES = 52,
+ PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53,
+ PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54,
+ PERF_TP_STARVE_CYCLES_SP = 55,
+ PERF_TP_STARVE_CYCLES_UCHE = 56,
+};
+
+enum a6xx_sp_perfcounter_select {
+ PERF_SP_BUSY_CYCLES = 0,
+ PERF_SP_ALU_WORKING_CYCLES = 1,
+ PERF_SP_EFU_WORKING_CYCLES = 2,
+ PERF_SP_STALL_CYCLES_VPC = 3,
+ PERF_SP_STALL_CYCLES_TP = 4,
+ PERF_SP_STALL_CYCLES_UCHE = 5,
+ PERF_SP_STALL_CYCLES_RB = 6,
+ PERF_SP_NON_EXECUTION_CYCLES = 7,
+ PERF_SP_WAVE_CONTEXTS = 8,
+ PERF_SP_WAVE_CONTEXT_CYCLES = 9,
+ PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
+ PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
+ PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
+ PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
+ PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
+ PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
+ PERF_SP_WAVE_CTRL_CYCLES = 16,
+ PERF_SP_WAVE_LOAD_CYCLES = 17,
+ PERF_SP_WAVE_EMIT_CYCLES = 18,
+ PERF_SP_WAVE_NOP_CYCLES = 19,
+ PERF_SP_WAVE_WAIT_CYCLES = 20,
+ PERF_SP_WAVE_FETCH_CYCLES = 21,
+ PERF_SP_WAVE_IDLE_CYCLES = 22,
+ PERF_SP_WAVE_END_CYCLES = 23,
+ PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
+ PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
+ PERF_SP_WAVE_JOIN_CYCLES = 26,
+ PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
+ PERF_SP_LM_STORE_INSTRUCTIONS = 28,
+ PERF_SP_LM_ATOMICS = 29,
+ PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
+ PERF_SP_GM_STORE_INSTRUCTIONS = 31,
+ PERF_SP_GM_ATOMICS = 32,
+ PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
+ PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34,
+ PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35,
+ PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36,
+ PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37,
+ PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38,
+ PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39,
+ PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40,
+ PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41,
+ PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42,
+ PERF_SP_VS_INSTRUCTIONS = 43,
+ PERF_SP_FS_INSTRUCTIONS = 44,
+ PERF_SP_ADDR_LOCK_COUNT = 45,
+ PERF_SP_UCHE_READ_TRANS = 46,
+ PERF_SP_UCHE_WRITE_TRANS = 47,
+ PERF_SP_EXPORT_VPC_TRANS = 48,
+ PERF_SP_EXPORT_RB_TRANS = 49,
+ PERF_SP_PIXELS_KILLED = 50,
+ PERF_SP_ICL1_REQUESTS = 51,
+ PERF_SP_ICL1_MISSES = 52,
+ PERF_SP_HS_INSTRUCTIONS = 53,
+ PERF_SP_DS_INSTRUCTIONS = 54,
+ PERF_SP_GS_INSTRUCTIONS = 55,
+ PERF_SP_CS_INSTRUCTIONS = 56,
+ PERF_SP_GPR_READ = 57,
+ PERF_SP_GPR_WRITE = 58,
+ PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59,
+ PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60,
+ PERF_SP_LM_BANK_CONFLICTS = 61,
+ PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62,
+ PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63,
+ PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64,
+ PERF_SP_LM_WORKING_CYCLES = 65,
+ PERF_SP_DISPATCHER_WORKING_CYCLES = 66,
+ PERF_SP_SEQUENCER_WORKING_CYCLES = 67,
+ PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68,
+ PERF_SP_STARVE_CYCLES_HLSQ = 69,
+ PERF_SP_NON_EXECUTION_LS_CYCLES = 70,
+ PERF_SP_WORKING_EU = 71,
+ PERF_SP_ANY_EU_WORKING = 72,
+ PERF_SP_WORKING_EU_FS_STAGE = 73,
+ PERF_SP_ANY_EU_WORKING_FS_STAGE = 74,
+ PERF_SP_WORKING_EU_VS_STAGE = 75,
+ PERF_SP_ANY_EU_WORKING_VS_STAGE = 76,
+ PERF_SP_WORKING_EU_CS_STAGE = 77,
+ PERF_SP_ANY_EU_WORKING_CS_STAGE = 78,
+ PERF_SP_GPR_READ_PREFETCH = 79,
+ PERF_SP_GPR_READ_CONFLICT = 80,
+ PERF_SP_GPR_WRITE_CONFLICT = 81,
+ PERF_SP_GM_LOAD_LATENCY_CYCLES = 82,
+ PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83,
+ PERF_SP_EXECUTABLE_WAVES = 84,
+};
+
+enum a6xx_rb_perfcounter_select {
+ PERF_RB_BUSY_CYCLES = 0,
+ PERF_RB_STALL_CYCLES_HLSQ = 1,
+ PERF_RB_STALL_CYCLES_FIFO0_FULL = 2,
+ PERF_RB_STALL_CYCLES_FIFO1_FULL = 3,
+ PERF_RB_STALL_CYCLES_FIFO2_FULL = 4,
+ PERF_RB_STARVE_CYCLES_SP = 5,
+ PERF_RB_STARVE_CYCLES_LRZ_TILE = 6,
+ PERF_RB_STARVE_CYCLES_CCU = 7,
+ PERF_RB_STARVE_CYCLES_Z_PLANE = 8,
+ PERF_RB_STARVE_CYCLES_BARY_PLANE = 9,
+ PERF_RB_Z_WORKLOAD = 10,
+ PERF_RB_HLSQ_ACTIVE = 11,
+ PERF_RB_Z_READ = 12,
+ PERF_RB_Z_WRITE = 13,
+ PERF_RB_C_READ = 14,
+ PERF_RB_C_WRITE = 15,
+ PERF_RB_TOTAL_PASS = 16,
+ PERF_RB_Z_PASS = 17,
+ PERF_RB_Z_FAIL = 18,
+ PERF_RB_S_FAIL = 19,
+ PERF_RB_BLENDED_FXP_COMPONENTS = 20,
+ PERF_RB_BLENDED_FP16_COMPONENTS = 21,
+ PERF_RB_PS_INVOCATIONS = 22,
+ PERF_RB_2D_ALIVE_CYCLES = 23,
+ PERF_RB_2D_STALL_CYCLES_A2D = 24,
+ PERF_RB_2D_STARVE_CYCLES_SRC = 25,
+ PERF_RB_2D_STARVE_CYCLES_SP = 26,
+ PERF_RB_2D_STARVE_CYCLES_DST = 27,
+ PERF_RB_2D_VALID_PIXELS = 28,
+ PERF_RB_3D_PIXELS = 29,
+ PERF_RB_BLENDER_WORKING_CYCLES = 30,
+ PERF_RB_ZPROC_WORKING_CYCLES = 31,
+ PERF_RB_CPROC_WORKING_CYCLES = 32,
+ PERF_RB_SAMPLER_WORKING_CYCLES = 33,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37,
+ PERF_RB_STALL_CYCLES_VPC = 38,
+ PERF_RB_2D_INPUT_TRANS = 39,
+ PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40,
+ PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41,
+ PERF_RB_BLENDED_FP32_COMPONENTS = 42,
+ PERF_RB_COLOR_PIX_TILES = 43,
+ PERF_RB_STALL_CYCLES_CCU = 44,
+ PERF_RB_EARLY_Z_ARB3_GRANT = 45,
+ PERF_RB_LATE_Z_ARB3_GRANT = 46,
+ PERF_RB_EARLY_Z_SKIP_GRANT = 47,
+};
+
+enum a6xx_vsc_perfcounter_select {
+ PERF_VSC_BUSY_CYCLES = 0,
+ PERF_VSC_WORKING_CYCLES = 1,
+ PERF_VSC_STALL_CYCLES_UCHE = 2,
+ PERF_VSC_EOT_NUM = 3,
+ PERF_VSC_INPUT_TILES = 4,
+};
+
+enum a6xx_ccu_perfcounter_select {
+ PERF_CCU_BUSY_CYCLES = 0,
+ PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
+ PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
+ PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
+ PERF_CCU_DEPTH_BLOCKS = 4,
+ PERF_CCU_COLOR_BLOCKS = 5,
+ PERF_CCU_DEPTH_BLOCK_HIT = 6,
+ PERF_CCU_COLOR_BLOCK_HIT = 7,
+ PERF_CCU_PARTIAL_BLOCK_READ = 8,
+ PERF_CCU_GMEM_READ = 9,
+ PERF_CCU_GMEM_WRITE = 10,
+ PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
+ PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
+ PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
+ PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
+ PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
+ PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16,
+ PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17,
+ PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18,
+ PERF_CCU_COLOR_READ_FLAG0_COUNT = 19,
+ PERF_CCU_COLOR_READ_FLAG1_COUNT = 20,
+ PERF_CCU_COLOR_READ_FLAG2_COUNT = 21,
+ PERF_CCU_COLOR_READ_FLAG3_COUNT = 22,
+ PERF_CCU_COLOR_READ_FLAG4_COUNT = 23,
+ PERF_CCU_COLOR_READ_FLAG5_COUNT = 24,
+ PERF_CCU_COLOR_READ_FLAG6_COUNT = 25,
+ PERF_CCU_COLOR_READ_FLAG8_COUNT = 26,
+ PERF_CCU_2D_RD_REQ = 27,
+ PERF_CCU_2D_WR_REQ = 28,
+};
+
+enum a6xx_lrz_perfcounter_select {
+ PERF_LRZ_BUSY_CYCLES = 0,
+ PERF_LRZ_STARVE_CYCLES_RAS = 1,
+ PERF_LRZ_STALL_CYCLES_RB = 2,
+ PERF_LRZ_STALL_CYCLES_VSC = 3,
+ PERF_LRZ_STALL_CYCLES_VPC = 4,
+ PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
+ PERF_LRZ_STALL_CYCLES_UCHE = 6,
+ PERF_LRZ_LRZ_READ = 7,
+ PERF_LRZ_LRZ_WRITE = 8,
+ PERF_LRZ_READ_LATENCY = 9,
+ PERF_LRZ_MERGE_CACHE_UPDATING = 10,
+ PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
+ PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
+ PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
+ PERF_LRZ_FULL_8X8_TILES = 14,
+ PERF_LRZ_PARTIAL_8X8_TILES = 15,
+ PERF_LRZ_TILE_KILLED = 16,
+ PERF_LRZ_TOTAL_PIXEL = 17,
+ PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
+ PERF_LRZ_FULLY_COVERED_TILES = 19,
+ PERF_LRZ_PARTIAL_COVERED_TILES = 20,
+ PERF_LRZ_FEEDBACK_ACCEPT = 21,
+ PERF_LRZ_FEEDBACK_DISCARD = 22,
+ PERF_LRZ_FEEDBACK_STALL = 23,
+ PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24,
+ PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25,
+ PERF_LRZ_STALL_CYCLES_VC = 26,
+ PERF_LRZ_RAS_MASK_TRANS = 27,
+};
+
+enum a6xx_cmp_perfcounter_select {
+ PERF_CMPDECMP_STALL_CYCLES_ARB = 0,
+ PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
+ PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
+ PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
+ PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
+ PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
+ PERF_CMPDECMP_VBIF_READ_DATA = 7,
+ PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
+ PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
+ PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27,
+ PERF_CMPDECMP_2D_RD_DATA = 28,
+ PERF_CMPDECMP_2D_WR_DATA = 29,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31,
+ PERF_CMPDECMP_2D_OUTPUT_TRANS = 32,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35,
+ PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36,
+ PERF_CMPDECMP_2D_BUSY_CYCLES = 37,
+ PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38,
+ PERF_CMPDECMP_2D_PIXELS = 39,
};
enum a6xx_tex_filter {
@@ -1765,12 +2444,39 @@
#define REG_A6XX_VBIF_VERSION 0x00003000
+#define REG_A6XX_VBIF_CLKON 0x00003001
+#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c
+
#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -1813,313 +2519,79 @@
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
+#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
}
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
}
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_SP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+ return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+ return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
}
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
+#define A6XX_SP_TP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
}
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
+#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x000000ff
+#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+ return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x0001ff00
+#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+ return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+#define A6XX_GRAS_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_GRAS_BIN_CONTROL_USE_VIZ 0x00200000
+
+#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
+#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x000000ff
+#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x0001ff00
+#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430
-
-#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140
-
-#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148
-
-#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545
-
-#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577
-
-#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9
-
-#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db
-
-#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000
-
-#define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4
-#define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1
-#define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307
-#define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_X1_BIN_SIZE 0x000080a1
-#define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val)
-{
- return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK;
-}
-#define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val)
-{
- return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A6XX_X2_BIN_SIZE 0x00008800
-#define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val)
-{
- return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK;
-}
-#define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val)
-{
- return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK;
-}
-
-#define REG_A6XX_X3_BIN_SIZE 0x000088d3
-#define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val)
-{
- return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK;
-}
-#define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val)
-{
- return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK;
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
}
#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
@@ -2182,11 +2654,19 @@
return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
}
-#define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30
+#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO 0x00000c30
-#define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31
+#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_HI 0x00000c31
-#define REG_A6XX_VSC_XXX_PITCH 0x00000c32
+#define REG_A6XX_VSC_PIPE_DATA2_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PIPE_DATA2_ARRAY_PITCH 0x00000c33
+#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK;
+}
#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
@@ -2194,18 +2674,29 @@
#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+#define REG_A6XX_VSC_PIPE_DATA_ARRAY_PITCH 0x00000c37
+#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_DATA_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK;
+}
+
static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+#define REG_A6XX_GRAS_UNKNOWN_8000 0x00008000
+
#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
#define REG_A6XX_GRAS_CNTL 0x00008005
#define A6XX_GRAS_CNTL_VARYING 0x00000001
+#define A6XX_GRAS_CNTL_UNK3 0x00000008
#define A6XX_GRAS_CNTL_XCOORD 0x00000040
#define A6XX_GRAS_CNTL_YCOORD 0x00000080
#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
@@ -2308,6 +2799,9 @@
return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
}
+#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
@@ -2344,6 +2838,8 @@
#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+#define REG_A6XX_GRAS_UNKNOWN_80A0 0x000080a0
+
#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -2463,6 +2959,10 @@
#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define A6XX_GRAS_LRZ_CNTL_UNK3 0x00000008
+#define A6XX_GRAS_LRZ_CNTL_UNK4 0x00000010
+
+#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -2494,7 +2994,18 @@
#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
+#define REG_A6XX_GRAS_UNKNOWN_8109 0x00008109
+
+#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
+
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
@@ -2590,6 +3101,33 @@
#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+#define REG_A6XX_RB_BIN_CONTROL 0x00008800
+#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x000000ff
+#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x0001ff00
+#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_RB_BIN_CONTROL_USE_VIZ 0x00200000
+
+#define REG_A6XX_RB_RENDER_CNTL 0x00008801
+#define A6XX_RB_RENDER_CNTL_UNK4 0x00000010
+#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080
+#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+
#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -2615,6 +3153,7 @@
#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_UNK3 0x00000008
#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
@@ -2747,6 +3286,10 @@
#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+#define REG_A6XX_RB_UNKNOWN_8810 0x00008810
+
+#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
+
#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
@@ -2837,7 +3380,6 @@
{
return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
-#define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
@@ -2916,6 +3458,7 @@
return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -2923,6 +3466,9 @@
return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
}
+#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870
+#define A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
@@ -3053,6 +3599,12 @@
{
return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
}
+#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00
+#define A6XX_RB_STENCILREF_BFREF__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK;
+}
#define REG_A6XX_RB_STENCILMASK 0x00008888
#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
@@ -3061,6 +3613,12 @@
{
return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
}
+#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK;
+}
#define REG_A6XX_RB_STENCILWRMASK 0x00008889
#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
@@ -3069,6 +3627,12 @@
{
return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
}
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK;
+}
#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
@@ -3088,6 +3652,9 @@
#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define REG_A6XX_RB_LRZ_CNTL 0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+
#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
@@ -3120,6 +3687,14 @@
return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
}
+#define REG_A6XX_RB_MSAA_CNTL 0x000088d5
+#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018
+#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
+}
+
#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
@@ -3130,6 +3705,12 @@
return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
}
#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
@@ -3177,14 +3758,14 @@
#define REG_A6XX_RB_BLIT_INFO 0x000088e3
#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
-#define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002
+#define A6XX_RB_BLIT_INFO_GMEM 0x00000002
#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
-#define A6XX_RB_BLIT_INFO_UNK3 0x00000008
-#define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0
-#define A6XX_RB_BLIT_INFO_MASK__SHIFT 4
-static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val)
+#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
{
- return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK;
+ return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
}
#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
@@ -3226,6 +3807,9 @@
{
return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
+
+#define REG_A6XX_RB_UNKNOWN_8C01 0x00008c01
#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -3274,12 +3858,16 @@
#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+#define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04
+
#define REG_A6XX_RB_CCU_CNTL 0x00008e07
#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
+
#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
@@ -3385,6 +3973,9 @@
#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+#define REG_A6XX_VPC_SO_OVERRIDE 0x00009306
+#define A6XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
+
#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
@@ -3397,8 +3988,14 @@
#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
+#define REG_A6XX_PC_UNKNOWN_9806 0x00009806
+
+#define REG_A6XX_PC_UNKNOWN_9980 0x00009980
+
#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+#define REG_A6XX_PC_UNKNOWN_9990 0x00009990
+
#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
@@ -3410,6 +4007,7 @@
{
return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
}
+#define A6XX_PC_PRIMITIVE_CNTL_1_PSIZE 0x00000100
#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
@@ -3488,6 +4086,8 @@
#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+#define REG_A6XX_VFD_UNKNOWN_A009 0x0000a009
+
#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
@@ -3640,6 +4240,8 @@
#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_UNKNOWN_A81B 0x0000a81b
+
#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
@@ -3884,6 +4486,8 @@
#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_UNKNOWN_A982 0x0000a982
+
#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
@@ -3891,6 +4495,7 @@
#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
@@ -3979,7 +4584,8 @@
}
#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
-#define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A6XX_SP_UNKNOWN_A99E 0x0000a99e
#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
@@ -4066,14 +4672,22 @@
#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
+
+#define REG_A6XX_SP_UNKNOWN_ACC0 0x0000acc0
+
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
+
#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
+
#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -4097,6 +4711,8 @@
#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+#define REG_A6XX_SP_TP_UNKNOWN_B309 0x0000b309
+
#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -4117,11 +4733,34 @@
return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x01fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
@@ -4162,6 +4801,8 @@
return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
}
+#define REG_A6XX_HLSQ_UNKNOWN_B980 0x0000b980
+
#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
@@ -4448,6 +5089,12 @@
{
return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A6XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
@@ -4537,11 +5184,11 @@
}
#define REG_A6XX_TEX_CONST_8 0x00000008
-#define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff
-#define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
+#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
{
- return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK;
+ return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK;
}
#define REG_A6XX_TEX_CONST_9 0x00000009
@@ -4558,5 +5205,231 @@
#define REG_A6XX_TEX_CONST_15 0x0000000f
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index bbb8126..85f14fe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,13 +2,32 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
-#include <linux/iopoll.h>
+#include <linux/interconnect.h>
+#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* FIXME: add a banner here */
+ gmu->hung = true;
+
+ /* Turn off the hangcheck timer while we are resetting */
+ del_timer(&gpu->hangcheck_timer);
+
+ /* Queue the GPU handler because we need to treat this as a recovery */
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
static irqreturn_t a6xx_gmu_irq(int irq, void *data)
{
struct a6xx_gmu *gmu = data;
@@ -20,8 +39,7 @@
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
@@ -42,35 +60,57 @@
status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
- if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
- tasklet_schedule(&gmu->hfi_tasklet);
-
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
return IRQ_HANDLED;
}
-/* Check to see if the GX rail is still powered */
-static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
{
- u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
+/* Check to see if the GX rail is still powered */
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
-static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret;
+
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
- ((index << 24) & 0xff) | (3 & 0xf));
+ ((3 & 0xf) << 28) | index);
/*
* Send an invalid index as a vote for the bus bandwidth and let the
@@ -82,7 +122,43 @@
a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
- return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ if (ret)
+ dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
+
+ gmu->freq = gmu->gpu_freqs[index];
+
+ /*
+ * Eventually we will want to scale the path vote with the frequency but
+ * for now leave it at max so that the performance is nominal.
+ */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
+}
+
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 perf_index = 0;
+
+ if (freq == gmu->freq)
+ return;
+
+ for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+ if (freq == gmu->gpu_freqs[perf_index])
+ break;
+
+ __a6xx_gmu_set_freq(gmu, perf_index);
+}
+
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return gmu->freq;
}
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
@@ -106,10 +182,8 @@
}
/* Wait for the GMU to get to its most idle state */
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
-
return spin_until(a6xx_gmu_check_idle_level(gmu));
}
@@ -125,7 +199,7 @@
val == 0xbabeface, 100, 10000);
if (ret)
- dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
return ret;
}
@@ -135,15 +209,12 @@
u32 val;
int ret;
- gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
- A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
-
gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
val & 1, 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to start the HFI queues\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
return ret;
}
@@ -184,7 +255,7 @@
val & (1 << ack), 100, 10000);
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
@@ -226,7 +297,7 @@
(val & 0x38) == 0x28, 1, 100);
if (ret) {
- dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -248,7 +319,7 @@
(val & 0x04), 100, 10000);
if (ret)
- dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -292,7 +363,7 @@
/* Check to see if the GMU really did slumber */
if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
!= 0x0f) {
- dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
ret = -ETIMEDOUT;
}
}
@@ -314,23 +385,27 @@
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
if (ret) {
- dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
return ret;
}
ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
- if (!ret) {
- gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
-
- /* Re-enable the power counter */
- gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
- return 0;
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
}
- dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
- return ret;
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Set up CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+ gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+
+ /* Enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
}
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
@@ -343,13 +418,28 @@
ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
+static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
+{
+ return msm_writel(value, ptr + (offset << 2));
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name);
+
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
+ struct platform_device *pdev = to_platform_device(gmu->dev);
+ void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+ void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+
+ if (!pdcptr || !seqptr)
+ goto err;
+
/* Disable SDE clock gating */
gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
@@ -374,44 +464,50 @@
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
/* Load PDC sequencer uCode for power up and power down sequence */
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
/* Set TCS commands used by PDC sequence for low power modes */
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
/* ensure no writes happen before the uCode is fully written */
wmb();
+
+err:
+ if (!IS_ERR_OR_NULL(pdcptr))
+ iounmap(pdcptr);
+ if (!IS_ERR_OR_NULL(seqptr))
+ iounmap(seqptr);
}
/*
@@ -476,7 +572,7 @@
/* Sanity check the size of the firmware that was loaded */
if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"GMU firmware is bigger than the available region\n");
return -EINVAL;
}
@@ -488,7 +584,7 @@
if (!rpmh_init) {
a6xx_gmu_rpmh_init(gmu);
rpmh_init = true;
- } else if (state != GMU_RESET) {
+ } else {
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
@@ -547,28 +643,13 @@
}
#define A6XX_HFI_IRQ_MASK \
- (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
- A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+ (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
#define A6XX_GMU_IRQ_MASK \
(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
-static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
-{
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
-
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
- ~A6XX_GMU_IRQ_MASK);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
- ~A6XX_HFI_IRQ_MASK);
-
- enable_irq(gmu->gmu_irq);
- enable_irq(gmu->hfi_irq);
-}
-
static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
{
disable_irq(gmu->gmu_irq);
@@ -578,21 +659,10 @@
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
}
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- int ret;
u32 val;
- /* Flush all the queues */
- a6xx_hfi_stop(gmu);
-
- /* Stop the interrupts */
- a6xx_gmu_irq_disable(gmu);
-
- /* Force off SPTP in case the GMU is managing it */
- a6xx_sptprac_disable(gmu);
-
/* Make sure there are no outstanding RPMh votes */
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
@@ -602,57 +672,54 @@
(val & 1), 100, 10000);
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
+}
- /* Force off the GX GSDC */
- regulator_force_disable(gmu->gx);
+/* Force the GMU off in case it isn't responsive */
+static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+{
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
- /* Disable the resources */
- clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
- pm_runtime_put_sync(gmu->dev);
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
- /* Re-enable the resources */
- pm_runtime_get_sync(gmu->dev);
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
- /* Use a known rate to bring up the GMU */
- clk_set_rate(gmu->core_clk, 200000000);
- ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
-
- a6xx_gmu_irq_enable(gmu);
-
- ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
- if (!ret)
- ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
-
- /* Set the GPU back to the highest power frequency */
- a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
-
-out:
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
-
- return ret;
+ /* Make sure there are no outstanding RPMh votes */
+ a6xx_gmu_rpmh_off(gmu);
}
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int status, ret;
- if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
+ if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
return 0;
+ gmu->hung = false;
+
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
+ if (ret) {
+ pm_runtime_put(gmu->dev);
+ return ret;
+ }
- a6xx_gmu_irq_enable(gmu);
+ /* Set the bus quota to a reasonable value for boot */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
+
+ /* Enable the GMU interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
+ enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
@@ -663,14 +730,35 @@
goto out;
ret = a6xx_hfi_start(gmu, status);
+ if (ret)
+ goto out;
+
+ /*
+ * Turn on the GMU firmware fault interrupt after we know the boot
+ * sequence is successful
+ */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
+ enable_irq(gmu->hfi_irq);
/* Set the GPU to the highest power frequency */
- a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_get(gmu->gxpd);
out:
- /* Make sure to turn off the boot OOB request on error */
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ /* On failure, shut down the GMU to leave it in a good state */
+ if (ret) {
+ disable_irq(gmu->gmu_irq);
+ a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->dev);
+ }
return ret;
}
@@ -679,7 +767,7 @@
{
u32 reg;
- if (!gmu->mmio)
+ if (!gmu->initialized)
return true;
reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
@@ -690,9 +778,12 @@
return true;
}
-int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+/* Gracefully try to shut down the GMU and by extension the GPU */
+static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
u32 val;
/*
@@ -702,10 +793,19 @@
val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+ int ret = a6xx_gmu_wait_for_idle(gmu);
- /* Temporary until we can recover safely */
- BUG_ON(ret);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
+ == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
@@ -721,7 +821,7 @@
*/
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
@@ -737,6 +837,36 @@
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+}
+
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+
+ if (!pm_runtime_active(gmu->dev))
+ return 0;
+
+ /*
+ * Force the GMU off if we detected a hang, otherwise try to shut it
+ * down gracefully
+ */
+ if (gmu->hung)
+ a6xx_gmu_force_off(gmu);
+ else
+ a6xx_gmu_shutdown(gmu);
+
+ /* Remove the bus vote */
+ icc_set_bw(gpu->icc_path, 0, 0);
+
+ /*
+ * Make sure the GX domain is off before turning off the GMU (CX)
+ * domain. Usually the GMU does this but only if the shutdown sequence
+ * was successful
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_put_sync(gmu->gxpd);
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
@@ -800,7 +930,7 @@
IOMMU_READ | IOMMU_WRITE);
if (ret) {
- dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
@@ -859,46 +989,21 @@
return ret;
}
-/* Get the list of RPMh voltage levels from cmd-db */
-static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
-{
- u32 len = cmd_db_read_aux_data_len(id);
-
- if (!len)
- return 0;
-
- if (WARN_ON(len > size))
- return -EINVAL;
-
- cmd_db_read_aux_data(id, vals, len);
-
- /*
- * The data comes back as an array of unsigned shorts so adjust the
- * count accordingly
- */
- return len >> 1;
-}
-
/* Return the 'arc-level' for the given frequency */
-static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
+ unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_node *np;
- u32 val = 0;
+ unsigned int val;
if (!freq)
return 0;
- opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
if (IS_ERR(opp))
return 0;
- np = dev_pm_opp_get_of_node(opp);
-
- if (np) {
- of_property_read_u32(np, "qcom,level", &val);
- of_node_put(np);
- }
+ val = dev_pm_opp_get_level(opp);
dev_pm_opp_put(opp);
@@ -906,16 +1011,35 @@
}
static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
- unsigned long *freqs, int freqs_count,
- u16 *pri, int pri_count,
- u16 *sec, int sec_count)
+ unsigned long *freqs, int freqs_count, const char *id)
{
int i, j;
+ const u16 *pri, *sec;
+ size_t pri_count, sec_count;
+
+ pri = cmd_db_read_aux_data(id, &pri_count);
+ if (IS_ERR(pri))
+ return PTR_ERR(pri);
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ pri_count >>= 1;
+ if (!pri_count)
+ return -EINVAL;
+
+ sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
+ if (IS_ERR(sec))
+ return PTR_ERR(sec);
+
+ sec_count >>= 1;
+ if (!sec_count)
+ return -EINVAL;
/* Construct a vote for each frequency */
for (i = 0; i < freqs_count; i++) {
u8 pindex = 0, sindex = 0;
- u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
/* Get the primary index that matches the arc level */
for (j = 0; j < pri_count; j++) {
@@ -926,12 +1050,12 @@
}
if (j == pri_count) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"Level %u not found in in the RPMh list\n",
level);
- dev_err(dev, "Available levels:\n");
+ DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
- dev_err(dev, " %u\n", pri[j]);
+ DRM_DEV_ERROR(dev, " %u\n", pri[j]);
return -EINVAL;
}
@@ -969,25 +1093,15 @@
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
-
- u16 gx[16], cx[16], mx[16];
- u32 gxcount, cxcount, mxcount;
int ret;
- /* Get the list of available voltage levels for each component */
- gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
- cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
- mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
-
/* Build the GX votes */
ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
- gmu->gpu_freqs, gmu->nr_gpu_freqs,
- gx, gxcount, mx, mxcount);
+ gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
/* Build the CX votes */
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
- gmu->gmu_freqs, gmu->nr_gmu_freqs,
- cx, cxcount, mx, mxcount);
+ gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
return ret;
}
@@ -1038,7 +1152,7 @@
*/
ret = dev_pm_opp_of_add_table(gmu->dev);
if (ret) {
- dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
return ret;
}
@@ -1058,7 +1172,7 @@
static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
{
- int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
+ int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
if (ret < 1)
return ret;
@@ -1079,13 +1193,13 @@
IORESOURCE_MEM, name);
if (!res) {
- dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
- ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ ret = ioremap(res->start, resource_size(res));
if (!ret) {
- dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
@@ -1099,10 +1213,10 @@
irq = platform_get_irq_byname(pdev, name);
- ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
- name, gmu);
+ ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
if (ret) {
- dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
+ name, ret);
return ret;
}
@@ -1115,21 +1229,35 @@
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- if (IS_ERR_OR_NULL(gmu->mmio))
+ if (!gmu->initialized)
return;
- pm_runtime_disable(gmu->dev);
- a6xx_gmu_stop(a6xx_gpu);
+ pm_runtime_force_suspend(gmu->dev);
- a6xx_gmu_irq_disable(gmu);
+ if (!IS_ERR_OR_NULL(gmu->gxpd)) {
+ pm_runtime_disable(gmu->gxpd);
+ dev_pm_domain_detach(gmu->gxpd, false);
+ }
+
+ iounmap(gmu->mmio);
+ gmu->mmio = NULL;
+
a6xx_gmu_memory_free(gmu, gmu->hfi);
iommu_detach_device(gmu->domain, gmu->dev);
iommu_domain_free(gmu->domain);
+
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ gmu->initialized = false;
}
-int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
@@ -1140,52 +1268,50 @@
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, false);
+ of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
- gmu->gx = devm_regulator_get(gmu->dev, "vdd");
/* Get the list of clocks */
ret = a6xx_gmu_clocks_probe(gmu);
if (ret)
- return ret;
+ goto err_put_device;
/* Set up the IOMMU context bank */
ret = a6xx_gmu_memory_probe(gmu);
if (ret)
- return ret;
+ goto err_put_device;
/* Allocate memory for for the HFI queues */
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->hfi))
- goto err;
+ goto err_memory;
/* Allocate memory for the GMU debug region */
gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
if (IS_ERR(gmu->debug))
- goto err;
+ goto err_memory;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
-
- /* Map the GPU power domain controller registers */
- gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
-
- if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
- goto err;
+ if (IS_ERR(gmu->mmio))
+ goto err_memory;
/* Get the HFI and GMU interrupts */
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
- goto err;
+ goto err_mmio;
- /* Set up a tasklet to handle GMU HFI responses */
- tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
+ /*
+ * Get a link to the GX power domain to reset the GPU in case of GMU
+ * crash
+ */
+ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
@@ -1193,8 +1319,15 @@
/* Set up the HFI queues */
a6xx_hfi_init(gmu);
+ gmu->initialized = true;
+
return 0;
-err:
+
+err_mmio:
+ iounmap(gmu->mmio);
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+err_memory:
a6xx_gmu_memory_free(gmu, gmu->hfi);
if (gmu->domain) {
@@ -1202,6 +1335,11 @@
iommu_domain_free(gmu->domain);
}
+ ret = -ENODEV;
- return -ENODEV;
+err_put_device:
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index d9a386c..39a26dd 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -4,6 +4,7 @@
#ifndef _A6XX_GMU_H_
#define _A6XX_GMU_H_
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@@ -26,9 +27,6 @@
/* the GMU is coming up for the first time or back from a power collapse */
#define GMU_COLD_BOOT 1
-/* The GMU is being soft reset after a fault */
-#define GMU_RESET 2
-
/*
* These define the level of control that the GMU has - the higher the number
* the more things that the GMU hardware controls on its own.
@@ -47,16 +45,15 @@
struct device *dev;
void * __iomem mmio;
- void * __iomem pdc_mmio;
int hfi_irq;
int gmu_irq;
- struct regulator *gx;
-
struct iommu_domain *domain;
u64 uncached_iova_base;
+ struct device *gxpd;
+
int idle_level;
struct a6xx_gmu_bo *hfi;
@@ -74,9 +71,12 @@
unsigned long gmu_freqs[4];
u32 cx_arc_votes[4];
+ unsigned long freq;
+
struct a6xx_hfi_queue queues[2];
- struct tasklet_struct hfi_tasklet;
+ bool initialized;
+ bool hung;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
@@ -89,11 +89,6 @@
return msm_writel(value, gmu->mmio + (offset << 2));
}
-static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
-{
- return msm_writel(value, gmu->pdc_mmio + (offset << 2));
-}
-
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
{
u32 val = gmu_read(gmu, reg);
@@ -103,6 +98,16 @@
gmu_write(gmu, reg, val | or);
}
+static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ val = (u64) msm_readl(gmu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
@@ -157,6 +162,7 @@
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
-void a6xx_hfi_task(unsigned long data);
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index ef68098..1cc1c13 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -10,14 +10,14 @@
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -167,8 +167,8 @@
#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c629f74..dc8ec2c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -4,9 +4,14 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+#include <linux/devfreq.h>
+
+#define GPU_PAS_ID 13
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -65,13 +70,36 @@
gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
}
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+ u64 iova)
+{
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+ OUT_RING(ring, lower_32_bits(iova));
+ OUT_RING(ring, upper_32_bits(iova));
+}
+
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_start));
+
+ /*
+ * For PM4 the GMU register offsets are calculated from the base of the
+ * GPU registers so we need to add 0x1a800 to the register value on A630
+ * to get the right value from PM4.
+ */
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_start));
+
/* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
@@ -87,6 +115,7 @@
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
@@ -96,6 +125,11 @@
}
}
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_end));
+
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
@@ -110,6 +144,10 @@
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
+ trace_msm_gpu_submit_flush(submit,
+ gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+ REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+
a6xx_flush(gpu, ring);
}
@@ -298,6 +336,8 @@
return ret;
}
+
+ msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -306,6 +346,20 @@
return 0;
}
+static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ if (loaded)
+ return 0;
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
@@ -338,6 +392,20 @@
REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+ /* Turn on 64 bit addressing for all blocks */
+ gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
@@ -385,14 +453,6 @@
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- /* FIXME: not sure if this should live here or in a6xx_gmu.c */
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
- 0xff000000);
- gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
- 0xff, 0x20);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
- 0x01);
-
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
@@ -438,10 +498,8 @@
gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
- A6XX_PROTECT_RDONLY(0x8d0, 0x23));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
A6XX_PROTECT_RDONLY(0x980, 0x4));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
@@ -464,7 +522,28 @@
if (ret)
goto out;
- gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a6xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ }
out:
/*
@@ -481,7 +560,7 @@
static void a6xx_dump(struct msm_gpu *gpu)
{
- dev_info(&gpu->pdev->dev, "status: %08x\n",
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
gpu_read(gpu, REG_A6XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -498,7 +577,7 @@
adreno_dump_info(gpu);
for (i = 0; i < 8; i++)
- dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
if (hang_debug)
@@ -645,44 +724,21 @@
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
-static const u32 a6xx_registers[] = {
- 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
- 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
- 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
- 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
- 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
- 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
- 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
- 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
- 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
- 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
- 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
- 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
- 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
- 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
- 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
- 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
- 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
- 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
- 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
- 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
- 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
- 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
- 0xa610, 0xa617, 0xa630, 0xa630,
- ~0
-};
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
- ret = a6xx_gmu_resume(a6xx_gpu);
-
gpu->needs_hw_init = true;
- return ret;
+ ret = a6xx_gmu_resume(a6xx_gpu);
+ if (ret)
+ return ret;
+
+ msm_gpu_resume_devfreq(gpu);
+
+ return 0;
}
static int a6xx_pm_suspend(struct msm_gpu *gpu)
@@ -690,17 +746,7 @@
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- /*
- * Make sure the GMU is idle before continuing (because some transitions
- * may use VBIF
- */
- a6xx_gmu_wait_for_idle(a6xx_gpu);
-
- /* Clear the VBIF pipe before shutting down */
- /* FIXME: This accesses the GPU - do we need to make sure it is on? */
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+ devfreq_suspend_device(gpu->devfreq.devfreq);
return a6xx_gmu_stop(a6xx_gpu);
}
@@ -720,14 +766,6 @@
return 0;
}
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
- struct drm_printer *p)
-{
- adreno_show(gpu, state, p);
-}
-#endif
-
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -742,9 +780,8 @@
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- if (a6xx_gpu->sqe_iova)
- msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
a6xx_gmu_remove(a6xx_gpu);
@@ -753,6 +790,27 @@
kfree(a6xx_gpu);
}
+static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u64 busy_cycles, busy_time;
+
+ busy_cycles = gmu_read64(&a6xx_gpu->gmu,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
+
+ busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
+ do_div(busy_time, 192);
+
+ gpu->devfreq.busy_cycles = busy_cycles;
+
+ if (WARN_ON(busy_time > ~0LU))
+ return ~0LU;
+
+ return (unsigned long)busy_time;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -765,9 +823,16 @@
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
.show = a6xx_show,
#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gmu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
},
.get_timestamp = a6xx_get_timestamp,
};
@@ -789,7 +854,7 @@
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
- adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
@@ -799,12 +864,12 @@
}
/* Check if there is a GMU phandle and set it up */
- node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
/* FIXME: How do we gracefully handle this? */
BUG_ON(!node);
- ret = a6xx_gmu_probe(a6xx_gpu, node);
+ ret = a6xx_gmu_init(a6xx_gpu, node);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index dd69e5b..6439955 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -46,15 +46,23 @@
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
-int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644
index 0000000..e686331
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+ const void *handle;
+ u32 *data;
+};
+
+struct a6xx_gpu_state {
+ struct msm_gpu_state base;
+
+ struct a6xx_gpu_state_obj *gmu_registers;
+ int nr_gmu_registers;
+
+ struct a6xx_gpu_state_obj *registers;
+ int nr_registers;
+
+ struct a6xx_gpu_state_obj *shaders;
+ int nr_shaders;
+
+ struct a6xx_gpu_state_obj *clusters;
+ int nr_clusters;
+
+ struct a6xx_gpu_state_obj *dbgahb_clusters;
+ int nr_dbgahb_clusters;
+
+ struct a6xx_gpu_state_obj *indexed_regs;
+ int nr_indexed_regs;
+
+ struct a6xx_gpu_state_obj *debugbus;
+ int nr_debugbus;
+
+ struct a6xx_gpu_state_obj *vbif_debugbus;
+
+ struct a6xx_gpu_state_obj *cx_debugbus;
+ int nr_cx_debugbus;
+
+ struct list_head objs;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+ in[0] = val;
+ in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+ in[0] = target;
+ in[1] = (((u64) reg) << 44 | dwords);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+ in[0] = 0;
+ in[1] = 0;
+
+ return 2;
+}
+
+struct a6xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a6xx_state_memobj {
+ struct list_head node;
+ unsigned long long data[];
+};
+
+void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+ struct a6xx_state_memobj *obj =
+ kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ list_add_tail(&obj->node, &a6xx_state->objs);
+ return &obj->data;
+}
+
+void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+ size_t size)
+{
+ void *dst = state_kcalloc(a6xx_state, 1, size);
+
+ if (dst)
+ memcpy(dst, src, size);
+ return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+ return -EINVAL;
+
+ /* Make sure all pending memory writes are posted */
+ wmb();
+
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+ ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+ val & 0x02, 100, 10000);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+ return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+ msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+ msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+ u32 reg, int count, u32 *data)
+{
+ int i;
+
+ gpu_write(gpu, ctrl0, reg);
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, ctrl1, i);
+ data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+ }
+
+ return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+ ((16 * AXI_ARB_BLOCKS) + \
+ (18 * XIN_AXI_BLOCKS) + \
+ (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_gpu_state_obj *obj)
+{
+ u32 clk, *ptr;
+ int i;
+
+ obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+ sizeof(u32));
+ if (!obj->data)
+ return;
+
+ obj->handle = NULL;
+
+ /* Get the current clock setting */
+ clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+ /* Force on the bus so we can read it */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+ clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ /* We will read from BUS2 first, so disable BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+ /* Enable the VBIF bus for reading */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+ ptr = obj->data;
+
+ for (i = 0; i < AXI_ARB_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << (i + 16), 16, ptr);
+
+ for (i = 0; i < XIN_AXI_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << i, 18, ptr);
+
+ /* Stop BUS2 so we can turn on BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+ for (i = 0; i < XIN_CORE_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+ 1 << i, 12, ptr);
+
+ /* Restore the VBIF clock setting */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct resource *res;
+ void __iomem *cxdbg = NULL;
+
+ /* Set up the GX debug bus */
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+ /* Set up the CX debug bus - it lives elsewhere in the system so do a
+ * temporary ioremap for the registers
+ */
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+ "cx_dbgc");
+
+ if (res)
+ cxdbg = ioremap(res->start, resource_size(res));
+
+ if (cxdbg) {
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ 0x76543210);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ 0xFEDCBA98);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ }
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_debugbus_blocks),
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a6xx_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+ }
+
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
+
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+
+ if (cxdbg) {
+ a6xx_state->cx_debugbus =
+ state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+ sizeof(*a6xx_state->cx_debugbus));
+
+ if (a6xx_state->cx_debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+ a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_state,
+ &a6xx_cx_debugbus_blocks[i],
+ &a6xx_state->cx_debugbus[i]);
+
+ a6xx_state->nr_cx_debugbus =
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ }
+
+ iounmap(cxdbg);
+ }
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_dbgahb_cluster *dbgahb,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (dbgahb->statetype + i * 2) << 8);
+
+ for (j = 0; j < dbgahb->count; j += 2) {
+ int count = RANGE(dbgahb->registers, j);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ dbgahb->registers[j] - (dbgahb->base >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = dbgahb;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_dbgahb_clusters),
+ sizeof(*a6xx_state->dbgahb_clusters));
+
+ if (!a6xx_state->dbgahb_clusters)
+ return;
+
+ a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+ a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+ &a6xx_dbgahb_clusters[i],
+ &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_cluster *cluster,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel_reg)
+ in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+ (cluster->id << 8) | (i << 4) | i);
+
+ for (j = 0; j < cluster->count; j += 2) {
+ int count = RANGE(cluster->registers, j);
+
+ in += CRASHDUMP_READ(in, cluster->registers[j],
+ count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = cluster;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+ if (!a6xx_state->clusters)
+ return;
+
+ a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+ a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+ &a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_shader_block *block,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+ int i;
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (block->type << 8) | i);
+
+ in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+ block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = block;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->shaders = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+ if (!a6xx_state->shaders)
+ return;
+
+ a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+ a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+ &a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ regs->registers[i] - (regs->val0 >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ /* Some blocks might need to program a selector register first */
+ if (regs->val0)
+ in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+
+ in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gpu_read(gpu,
+ regs->registers[i] + j);
+ }
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gmu_read(gmu,
+ regs->registers[i] + j);
+ }
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+ 2, sizeof(*a6xx_state->gmu_registers));
+
+ if (!a6xx_state->gmu_registers)
+ return;
+
+ a6xx_state->nr_gmu_registers = 2;
+
+ /* Get the CX GMU registers from AHB */
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+ &a6xx_state->gmu_registers[0]);
+
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+ gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+ &a6xx_state->gmu_registers[1]);
+}
+
+static void a6xx_get_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ ARRAY_SIZE(a6xx_reglist) +
+ ARRAY_SIZE(a6xx_hlsq_reglist);
+ int index = 0;
+
+ a6xx_state->registers = state_kcalloc(a6xx_state,
+ count, sizeof(*a6xx_state->registers));
+
+ if (!a6xx_state->registers)
+ return;
+
+ a6xx_state->nr_registers = count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist[i],
+ &a6xx_state->registers[index++]);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_crashdumper_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+ a6xx_get_crashdumper_hlsq_registers(gpu,
+ a6xx_state, &a6xx_hlsq_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_indexed_registers *indexed,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+
+ obj->handle = (const void *) indexed;
+ obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ /* All the indexed banks start at address 0 */
+ gpu_write(gpu, indexed->addr, 0);
+
+ /* Read the data - each read increments the internal address by 1 */
+ for (i = 0; i < indexed->count; i++)
+ obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ u32 mempool_size;
+ int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+ int i;
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+ sizeof(a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ /* Set the CP mempool size to 0 to stabilize it while dumping */
+ mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ /*
+ * Offset 0x2000 in the mempool is the size - copy the saved size over
+ * so the data is consistent
+ */
+ a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+ /* Restore the size in the hardware */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a6xx_crashdumper dumper = { 0 };
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+ GFP_KERNEL);
+
+ if (!a6xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&a6xx_state->objs);
+
+ /* Get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+ a6xx_get_gmu_registers(gpu, a6xx_state);
+
+ /* If GX isn't on the rest of the data isn't going to be accessible */
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return &a6xx_state->base;
+
+ /* Get the banks of indexed registers */
+ a6xx_get_indexed_registers(gpu, a6xx_state);
+
+ /* Try to initialize the crashdumper */
+ if (!a6xx_crashdumper_init(gpu, &dumper)) {
+ a6xx_get_registers(gpu, a6xx_state, &dumper);
+ a6xx_get_shaders(gpu, a6xx_state, &dumper);
+ a6xx_get_clusters(gpu, a6xx_state, &dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, &dumper);
+
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+ }
+
+ a6xx_get_debugbus(gpu, a6xx_state);
+
+ return &a6xx_state->base;
+}
+
+void a6xx_gpu_state_destroy(struct kref *kref)
+{
+ struct a6xx_state_memobj *obj, *tmp;
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+
+ list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
+ kfree(obj);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+ struct drm_printer *p)
+{
+ int i, index = 0;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < count; i += 2) {
+ u32 count = RANGE(registers, i);
+ u32 offset = registers[i];
+ int j;
+
+ for (j = 0; j < count; index++, offset++, j++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+ char out[ASCII85_BUFSZ];
+ long i, l, datalen = 0;
+
+ for (i = 0; i < len >> 2; i++) {
+ if (data[i])
+ datalen = (i + 1) << 2;
+ }
+
+ if (datalen == 0)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+
+ l = ascii85_encode_len(datalen);
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(data[i], out));
+
+ drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+ drm_puts(p, fmt);
+ drm_puts(p, name);
+ drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_shader_block *block = obj->handle;
+ int i;
+
+ if (!obj->handle)
+ return;
+
+ print_name(p, " - type: ", block->name);
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ drm_printf(p, " - bank: %d\n", i);
+ drm_printf(p, " size: %d\n", block->size);
+
+ if (!obj->data)
+ continue;
+
+ print_ascii85(p, block->size << 2,
+ obj->data + (block->size * i));
+ }
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+ struct drm_printer *p)
+{
+ int ctx, index = 0;
+
+ for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+ int j;
+
+ drm_printf(p, " - context: %d\n", ctx);
+
+ for (j = 0; j < size; j += 2) {
+ u32 count = RANGE(registers, j);
+ u32 offset = registers[j];
+ int k;
+
+ for (k = 0; k < count; index++, offset++, k++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+ }
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+ if (dbgahb) {
+ print_name(p, " - cluster-name: ", dbgahb->name);
+ a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_cluster *cluster = obj->handle;
+
+ if (cluster) {
+ print_name(p, " - cluster-name: ", cluster->name);
+ a6xx_show_cluster_data(cluster->registers, cluster->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_indexed_registers *indexed = obj->handle;
+
+ if (!indexed)
+ return;
+
+ print_name(p, " - regs-name: ", indexed->name);
+ drm_printf(p, " dwords: %d\n", indexed->count);
+
+ print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+ u32 *data, struct drm_printer *p)
+{
+ if (block) {
+ print_name(p, " - debugbus-block: ", block->name);
+
+ /*
+ * count for regular debugbus data is in quadwords,
+ * but print the size in dwords for consistency
+ */
+ drm_printf(p, " count: %d\n", block->count << 1);
+
+ print_ascii85(p, block->count << 3, data);
+ }
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+ struct drm_printer *p)
+{
+ int i;
+
+ for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+
+ if (a6xx_state->vbif_debugbus) {
+ struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+ drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n");
+ drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+ /* vbif debugbus data is in dwords. Confusing, huh? */
+ print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+ }
+
+ for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ drm_puts(p, "registers:\n");
+ for (i = 0; i < a6xx_state->nr_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "registers-gmu:\n");
+ for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "indexed-registers:\n");
+ for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+ a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+ drm_puts(p, "shader-blocks:\n");
+ for (i = 0; i < a6xx_state->nr_shaders; i++)
+ a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+ drm_puts(p, "clusters:\n");
+ for (i = 0; i < a6xx_state->nr_clusters; i++)
+ a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+ for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+ a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+ drm_puts(p, "debugbus:\n");
+ a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644
index 0000000..68cccfa
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+ 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+ 0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+ 0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+ 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+ 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+ 0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+ 0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+ 0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+ 0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+ 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+ 0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+ 0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+ 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE 0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS 3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS 5
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+ { .id = _id, .name = #_id,\
+ .registers = _reg, \
+ .count = ARRAY_SIZE(_reg), \
+ .sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+ u32 id;
+ const char *name;
+ const u32 *registers;
+ size_t count;
+ u32 sel_reg;
+ u32 sel_val;
+} a6xx_clusters[] = {
+ CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+ CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+ 0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+ 0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+ 0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+ 0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+ 0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+ 0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+ 0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+ 0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+ 0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+ 0xaa00, 0xaa00, 0xaa30, 0xaa31,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+ 0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+ 0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+ 0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+ { .name = #_id, .statetype = _type, .base = _base, \
+ .registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+ const char *name;
+ u32 statetype;
+ u32 base;
+ const u32 *registers;
+ size_t count;
+} a6xx_dbgahb_clusters[] = {
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+ 0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+ 0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+ 0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+ 0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+ 0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+ const u32 *registers;
+ size_t count;
+ u32 val0;
+ u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+ { .val0 = _base, .val1 = _type, .registers = _array, \
+ .count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+ HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+ HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+ HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+ { .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+ const char *name;
+ u32 type;
+ u32 size;
+} a6xx_shader_blocks[] = {
+ SHADER(A6XX_TP0_TMO_DATA, 0x200),
+ SHADER(A6XX_TP0_SMO_DATA, 0x80),
+ SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_TP1_TMO_DATA, 0x200),
+ SHADER(A6XX_TP1_SMO_DATA, 0x80),
+ SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_SP_INST_DATA, 0x800),
+ SHADER(A6XX_SP_LB_0_DATA, 0x800),
+ SHADER(A6XX_SP_LB_1_DATA, 0x800),
+ SHADER(A6XX_SP_LB_2_DATA, 0x800),
+ SHADER(A6XX_SP_LB_3_DATA, 0x800),
+ SHADER(A6XX_SP_LB_4_DATA, 0x800),
+ SHADER(A6XX_SP_LB_5_DATA, 0x200),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+ SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+ SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_INST_TAG, 0x80),
+ SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+ SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+ SHADER(A6XX_SP_SMO_TAG, 0x80),
+ SHADER(A6XX_SP_STATE_DATA, 0x3f),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+ SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+ SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+ SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+ SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+ SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+ SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+ SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+ 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+ 0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+ 0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+ 0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+ 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+ 0x0540, 0x0555,
+ /* CP */
+ 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+ 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+ 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+ 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+ 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+ 0x0a00, 0x0a03,
+ /* VSC */
+ 0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+ /* UCHE */
+ 0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+ 0x0e38, 0x0e39,
+ /* GRAS */
+ 0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+ 0x8630, 0x8637,
+ /* VPC */
+ 0x9600, 0x9604, 0x9624, 0x9637,
+ /* PC */
+ 0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+ 0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+ 0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+ /* VFD */
+ 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+ 0xa630, 0xa630,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+ { .registers = _array, .count = ARRAY_SIZE(_array), \
+ .val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+ REGS(a6xx_registers, 0, 0),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+ /* RBBM_STATUS - RBBM_STATUS3 */
+ 0x210, 0x213,
+ /* CP_STATUS_1 */
+ 0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+ 0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+ 0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+ 0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+ 0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+ 0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+ 0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+ REGS(a6xx_ahb_registers, 0, 0),
+ REGS(a6xx_vbif_registers, 0, 0),
+};
+
+static const u32 a6xx_gmu_gx_registers[] = {
+ /* GMU GX */
+ 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+ 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+ 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+ 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+ 0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+ /* GMU CX */
+ 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+ 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+ 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+ 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+ 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+ 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+ 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+ 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+ 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ /* GPU RSCC */
+ 0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
+ 0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
+ 0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
+ /* GMU AO */
+ 0x9300, 0x9316, 0x9400, 0x9400,
+ /* GPU CC */
+ 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+ 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+ 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+ 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+ 0xb800, 0xb802,
+ /* GPU CC ACD */
+ 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+ REGS(a6xx_gmu_cx_registers, 0, 0),
+ REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static const struct a6xx_indexed_registers {
+ const char *name;
+ u32 addr;
+ u32 data;
+ u32 count;
+} a6xx_indexed_reglist[] = {
+ { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+};
+
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+ "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+ const char *name;
+ u32 id;
+ u32 count;
+} a6xx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index f19ef4c..eda11ab 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -79,83 +79,72 @@
return 0;
}
-struct a6xx_hfi_response {
- u32 id;
- u32 seqnum;
- struct list_head node;
- struct completion complete;
-
- u32 error;
- u32 payload[16];
-};
-
-/*
- * Incoming HFI ack messages can come in out of order so we need to store all
- * the pending messages on a list until they are handled.
- */
-static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
-static LIST_HEAD(hfi_ack_list);
-
-static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
- struct a6xx_hfi_msg_response *msg)
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
{
- struct a6xx_hfi_response *resp;
- u32 id, seqnum;
-
- /* msg->ret_header contains the header of the message being acked */
- id = HFI_HEADER_ID(msg->ret_header);
- seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
-
- spin_lock(&hfi_ack_lock);
- list_for_each_entry(resp, &hfi_ack_list, node) {
- if (resp->id == id && resp->seqnum == seqnum) {
- resp->error = msg->error;
- memcpy(resp->payload, msg->payload,
- sizeof(resp->payload));
-
- complete(&resp->complete);
- spin_unlock(&hfi_ack_lock);
- return;
- }
- }
- spin_unlock(&hfi_ack_lock);
-
- dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
-}
-
-static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
- struct a6xx_hfi_msg_response *msg)
-{
- struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
-
- dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
-}
-
-void a6xx_hfi_task(unsigned long data)
-{
- struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
- struct a6xx_hfi_msg_response resp;
+ u32 val;
+ int ret;
+
+ /* Wait for a response */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
+
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ return -ETIMEDOUT;
+ }
+
+ /* Clear the interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
for (;;) {
- u32 id;
- int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ struct a6xx_hfi_msg_response resp;
+
+ /* Get the next packet */
+ ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
sizeof(resp) >> 2);
- /* Returns the number of bytes copied or negative on error */
- if (ret <= 0) {
- if (ret < 0)
- dev_err(gmu->dev,
- "Unable to read the HFI message queue\n");
- break;
+ /* If the queue is empty our response never made it */
+ if (!ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
+
+ return -ENOENT;
}
- id = HFI_HEADER_ID(resp.header);
+ if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
+ struct a6xx_hfi_msg_error *error =
+ (struct a6xx_hfi_msg_error *) &resp;
- if (id == HFI_F2H_MSG_ACK)
- a6xx_hfi_handle_ack(gmu, &resp);
- else if (id == HFI_F2H_MSG_ERROR)
- a6xx_hfi_handle_error(gmu, &resp);
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
+ error->code);
+ continue;
+ }
+
+ if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Unexpected message id %d on the response queue\n",
+ HFI_HEADER_SEQNUM(resp.ret_header));
+ continue;
+ }
+
+ if (resp.error) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
+ }
+
+ /* All is well, copy over the buffer */
+ if (payload && payload_size)
+ memcpy(payload, resp.payload,
+ min_t(u32, payload_size, sizeof(resp.payload)));
+
+ return 0;
}
}
@@ -163,7 +152,6 @@
void *data, u32 size, u32 *payload, u32 payload_size)
{
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
- struct a6xx_hfi_response resp = { 0 };
int ret, dwords = size >> 2;
u32 seqnum;
@@ -173,53 +161,14 @@
*((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
(dwords << 8) | id;
- init_completion(&resp.complete);
- resp.id = id;
- resp.seqnum = seqnum;
-
- spin_lock_bh(&hfi_ack_lock);
- list_add_tail(&resp.node, &hfi_ack_list);
- spin_unlock_bh(&hfi_ack_lock);
-
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
- dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
- goto out;
- }
-
- /* Wait up to 5 seconds for the response */
- ret = wait_for_completion_timeout(&resp.complete,
- msecs_to_jiffies(5000));
- if (!ret) {
- dev_err(gmu->dev,
- "Message %s id %d timed out waiting for response\n",
- a6xx_hfi_msg_id[id], seqnum);
- ret = -ETIMEDOUT;
- } else
- ret = 0;
-
-out:
- spin_lock_bh(&hfi_ack_lock);
- list_del(&resp.node);
- spin_unlock_bh(&hfi_ack_lock);
-
- if (ret)
return ret;
-
- if (resp.error) {
- dev_err(gmu->dev, "Message %s id %d returned error %d\n",
- a6xx_hfi_msg_id[id], seqnum, resp.error);
- return -EINVAL;
}
- if (payload && payload_size) {
- int copy = min_t(u32, payload_size, sizeof(resp.payload));
-
- memcpy(payload, resp.payload, copy);
- }
-
- return 0;
+ return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
}
static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
@@ -368,7 +317,7 @@
continue;
if (queue->header->read_index != queue->header->write_index)
- dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+ DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
queue->header->read_index = 0;
queue->header->write_index = 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 5dace13..641d3ba 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,14 +10,14 @@
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -339,6 +339,15 @@
#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
#define REG_AXXX_CP_INT_CNTL 0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
#define REG_AXXX_CP_INT_STATUS 0x000001f3
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7d3e9a1..0888e0d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "adreno_gpu.h"
@@ -27,6 +16,39 @@
static const struct adreno_info gpulist[] = {
{
+ .rev = ADRENO_REV(2, 0, 0, 0),
+ .revn = 200,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, { /* a200 on i.mx51 has only 128kib gmem */
+ .rev = ADRENO_REV(2, 0, 0, 1),
+ .revn = 201,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(2, 2, 0, ANY_ID),
+ .revn = 220,
+ .name = "A220",
+ .fw = {
+ [ADRENO_FW_PM4] = "leia_pm4_470.fw",
+ [ADRENO_FW_PFP] = "leia_pfp_470.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
@@ -112,6 +134,24 @@
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
+ .rev = ADRENO_REV(5, 4, 0, 2),
+ .revn = 540,
+ .name = "A540",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a540_gpmu.fw2",
+ },
+ .gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a540_zap.mdt",
+ }, {
.rev = ADRENO_REV(6, 3, 0, ANY_ID),
.revn = 630,
.name = "A630",
@@ -120,7 +160,9 @@
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
+ .zapfw = "a630_zap.mdt",
},
};
@@ -139,6 +181,7 @@
MODULE_FIRMWARE("qcom/a530_zap.b02");
MODULE_FIRMWARE("qcom/a630_sqe.fw");
MODULE_FIRMWARE("qcom/a630_gmu.bin");
+MODULE_FIRMWARE("qcom/a630_zap.mbn");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -195,7 +238,8 @@
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
- dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ pm_runtime_put_sync(&pdev->dev);
+ DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
@@ -204,7 +248,7 @@
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
@@ -237,7 +281,8 @@
if (ret == 0) {
unsigned int r, patch;
- if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+ sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
rev->core = r / 100;
r %= 100;
rev->major = r / 10;
@@ -252,7 +297,7 @@
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", &chipid);
if (ret) {
- dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
+ DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
}
@@ -273,6 +318,7 @@
static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu *gpu;
int ret;
@@ -295,6 +341,8 @@
DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
+ priv->is_a2xx = config.rev.core == 2;
+
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
@@ -311,7 +359,7 @@
{
struct msm_gpu *gpu = dev_get_drvdata(dev);
- gpu->funcs->pm_suspend(gpu);
+ pm_runtime_force_suspend(dev);
gpu->funcs->destroy(gpu);
set_gpu_pdev(dev_get_drvdata(master), NULL);
@@ -322,9 +370,37 @@
.unbind = adreno_unbind,
};
+static void adreno_device_register_headless(void)
+{
+ /* on imx5, we don't have a top-level mdp/dpu node
+ * this creates a dummy node for the driver for that case
+ */
+ struct platform_device_info dummy_info = {
+ .parent = NULL,
+ .name = "msm",
+ .id = -1,
+ .res = NULL,
+ .num_res = 0,
+ .data = NULL,
+ .size_data = 0,
+ .dma_mask = ~0,
+ };
+ platform_device_register_full(&dummy_info);
+}
+
static int adreno_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &a3xx_ops);
+
+ int ret;
+
+ ret = component_add(&pdev->dev, &a3xx_ops);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+ adreno_device_register_headless();
+
+ return 0;
}
static int adreno_remove(struct platform_device *pdev)
@@ -336,6 +412,8 @@
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ /* for compatibility with imx5 gpu: */
+ { .compatible = "amd,imageon" },
/* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 93d70f4..048c8be 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -1,30 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/ascii85.h>
+#include <linux/interconnect.h>
+#include <linux/qcom_scm.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
+static bool zap_available = true;
+
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ u32 pasid)
+{
+ struct device *dev = &gpu->pdev->dev;
+ const struct firmware *fw;
+ struct device_node *np, *mem_np;
+ struct resource r;
+ phys_addr_t mem_phys;
+ ssize_t mem_size;
+ void *mem_region = NULL;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np) {
+ zap_available = false;
+ return -ENODEV;
+ }
+
+ mem_np = of_parse_phandle(np, "memory-region", 0);
+ of_node_put(np);
+ if (!mem_np) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(mem_np, 0, &r);
+ of_node_put(mem_np);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+
+ /* Request the MDT file for the firmware */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ if (IS_ERR(fw)) {
+ DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
+ return PTR_ERR(fw);
+ }
+
+ /* Figure out how much memory we need */
+ mem_size = qcom_mdt_get_size(fw);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto out;
+ }
+
+ if (mem_size > resource_size(&r)) {
+ DRM_DEV_ERROR(dev,
+ "memory region is too small to load the MDT\n");
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* Allocate memory for the firmware image */
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten through adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ ret = qcom_mdt_load(dev, fw, fwname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ } else {
+ char *newname;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
+ }
+ if (ret)
+ goto out;
+
+ /* Send the image to the secure world */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+
+ /*
+ * If the scm call returns -EOPNOTSUPP we assume that this target
+ * doesn't need/support the zap shader so quietly fail
+ */
+ if (ret == -EOPNOTSUPP)
+ zap_available = false;
+ else if (ret)
+ DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
+
+out:
+ if (mem_region)
+ memunmap(mem_region);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct platform_device *pdev = gpu->pdev;
+
+ /* Short cut if we determine the zap shader isn't available/needed */
+ if (!zap_available)
+ return -ENODEV;
+
+ /* We need SCM to be able to load the firmware */
+ if (!qcom_scm_is_available()) {
+ DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* Each GPU has a target specific zap shader firmware name to use */
+ if (!adreno_gpu->info->zapfw) {
+ zap_available = false;
+ DRM_DEV_ERROR(&pdev->dev,
+ "Zap shader firmware file not specified for this target\n");
+ return -ENODEV;
+ }
+
+ return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -62,6 +193,12 @@
case MSM_PARAM_NR_RINGS:
*value = gpu->nr_rings;
return 0;
+ case MSM_PARAM_PP_PGTABLE:
+ *value = 0;
+ return 0;
+ case MSM_PARAM_FAULTS:
+ *value = gpu->global_faults;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -89,12 +226,12 @@
ret = request_firmware_direct(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from new location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -109,12 +246,12 @@
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from legacy location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
fwname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -130,19 +267,19 @@
ret = request_firmware(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s with helper\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
- dev_err(drm->dev, "failed to load %s\n", fwname);
+ DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
fw = ERR_PTR(-ENOENT);
out:
kfree(newname);
@@ -209,14 +346,6 @@
if (!ring)
continue;
- ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
- if (ret) {
- ring->iova = 0;
- dev_err(gpu->dev->dev,
- "could not map ringbuffer %d: %d\n", i, ret);
- return ret;
- }
-
ring->cur = ring->start;
ring->next = ring->start;
@@ -277,7 +406,7 @@
ret = msm_gpu_hw_init(gpu);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
@@ -299,6 +428,7 @@
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
@@ -319,16 +449,27 @@
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
-
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
}
- /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
- OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
- OUT_RING(ring, rbmemptr(ring, fence));
- OUT_RING(ring, submit->seqno);
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ if (!adreno_is_a2xx(adreno_gpu)) {
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ } else {
+ /* BIT(31) means something else on a2xx */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+ }
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
@@ -406,7 +547,7 @@
size = j + 1;
if (size) {
- state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
if (state->ring[i].data) {
memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
state->ring[i].data_size = size << 2;
@@ -414,6 +555,10 @@
}
}
+ /* Some targets prefer to collect their own registers */
+ if (!adreno_gpu->registers)
+ return 0;
+
/* Count the number of registers */
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
count += adreno_gpu->registers[i + 1] -
@@ -445,7 +590,7 @@
int i;
for (i = 0; i < ARRAY_SIZE(state->ring); i++)
- kfree(state->ring[i].data);
+ kvfree(state->ring[i].data);
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
@@ -475,34 +620,74 @@
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
{
+ void *buf;
+ size_t buf_itr = 0, buffer_size;
char out[ASCII85_BUFSZ];
- long l, datalen, i;
+ long l;
+ int i;
- if (!ptr || !len)
- return;
+ if (!src || !len)
+ return NULL;
+
+ l = ascii85_encode_len(len);
/*
- * Only dump the non-zero part of the buffer - rarely will any data
- * completely fill the entire allocated size of the buffer
+ * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+ * account for the worst case of 5 bytes per dword plus the 1 for '\0'
*/
- for (datalen = 0, i = 0; i < len >> 2; i++) {
- if (ptr[i])
- datalen = (i << 2) + 1;
- }
+ buffer_size = (l * 5) + 1;
- /* Skip printing the object if it is empty */
- if (datalen == 0)
+ buf = kvmalloc(buffer_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ for (i = 0; i < l; i++)
+ buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ ascii85_encode(src[i], out));
+
+ return buf;
+}
+
+/* len is expected to be in bytes */
+static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded)
+{
+ if (!*ptr || !len)
return;
- l = ascii85_encode_len(datalen);
+ if (!*encoded) {
+ long datalen, i;
+ u32 *buf = *ptr;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will
+ * any data completely fill the entire allocated size of
+ * the buffer.
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++)
+ if (buf[i])
+ datalen = ((i + 1) << 2);
+
+ /*
+ * If we reach here, then the originally captured binary buffer
+ * will be replaced with the ascii85 encoded string
+ */
+ *ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+ kvfree(buf);
+
+ *encoded = true;
+ }
+
+ if (!*ptr)
+ return;
drm_puts(p, " data: !!ascii85 |\n");
drm_puts(p, " ");
- for (i = 0; i < l; i++)
- drm_puts(p, ascii85_encode(ptr[i], out));
+ drm_puts(p, *ptr);
drm_puts(p, "\n");
}
@@ -534,8 +719,8 @@
drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
- adreno_show_object(p, state->ring[i].data,
- state->ring[i].data_size);
+ adreno_show_object(p, &state->ring[i].data,
+ state->ring[i].data_size, &state->ring[i].encoded);
}
if (state->bos) {
@@ -546,17 +731,19 @@
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
- adreno_show_object(p, state->bos[i].data,
- state->bos[i].size);
+ adreno_show_object(p, &state->bos[i].data,
+ state->bos[i].size, &state->bos[i].encoded);
}
}
- drm_puts(p, "registers:\n");
+ if (state->nr_registers) {
+ drm_puts(p, "registers:\n");
- for (i = 0; i < state->nr_registers; i++) {
- drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
- state->registers[i * 2] << 2,
- state->registers[(i * 2) + 1]);
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
}
#endif
@@ -595,6 +782,9 @@
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
+ if (!adreno_gpu->registers)
+ return;
+
/* dump these out in a form that can be parsed by demsm: */
printk("IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -635,7 +825,7 @@
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
- dev_err(dev, "Could not find the GPU powerlevels\n");
+ DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@@ -674,7 +864,7 @@
else {
ret = dev_pm_opp_of_add_table(dev);
if (ret)
- dev_err(dev, "Unable to set the OPP table\n");
+ DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
}
if (!ret) {
@@ -695,6 +885,11 @@
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+ /* Check for an interconnect path for the bus */
+ gpu->icc_path = of_icc_get(dev, NULL);
+ if (IS_ERR(gpu->icc_path))
+ gpu->icc_path = NULL;
+
return 0;
}
@@ -713,10 +908,12 @@
adreno_gpu->rev = config->rev;
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
- adreno_gpu_config.irqname = "kgsl_3d0_irq";
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
+ /* maximum range of a2xx mmu */
+ if (adreno_is_a2xx(adreno_gpu))
+ adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
adreno_gpu_config.nr_rings = nr_rings;
@@ -733,10 +930,13 @@
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
+ struct msm_gpu *gpu = &adreno_gpu->base;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
+ icc_put(gpu->icc_path);
+
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index de6e6ee..c7441fb 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -1,26 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ADRENO_GPU_H__
#define __ADRENO_GPU_H__
#include <linux/firmware.h>
+#include <linux/iopoll.h>
#include "msm_gpu.h"
@@ -60,6 +50,7 @@
enum adreno_quirks {
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+ ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
};
struct adreno_rev {
@@ -154,6 +145,20 @@
__ret; \
})
+static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 300);
+}
+
+static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 210);
+}
+
+static inline bool adreno_is_a225(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 225;
+}
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -206,6 +211,11 @@
return gpu->revn == 530;
}
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 540;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -237,6 +247,12 @@
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
+/*
+ * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
+ * out of secure mode
+ */
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
+
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
@@ -334,6 +350,7 @@
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
@@ -375,4 +392,9 @@
((1 << 29) \
((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 03a91e1..79b907a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,14 +10,14 @@
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -108,6 +108,13 @@
DI_SRC_SEL_RESERVED = 3,
};
+enum pc_di_face_cull_sel {
+ DI_FACE_CULL_NONE = 0,
+ DI_FACE_CULL_FETCH = 1,
+ DI_FACE_BACKFACE_CULL = 2,
+ DI_FACE_FRONTFACE_CULL = 3,
+};
+
enum pc_di_index_size {
INDEX_SIZE_IGN = 0,
INDEX_SIZE_16_BIT = 0,
@@ -237,7 +244,7 @@
CP_UNK_A6XX_14 = 20,
CP_UNK_A6XX_36 = 54,
CP_UNK_A6XX_55 = 85,
- UNK_A6XX_6D = 109,
+ CP_REG_WRITE = 109,
};
enum adreno_state_block {
@@ -356,6 +363,7 @@
RM6_GMEM = 4,
RM6_BLIT2D = 5,
RM6_RESOLVE = 6,
+ RM6_BLIT2DSCALE = 12,
};
enum pseudo_reg {
@@ -968,19 +976,19 @@
}
#define REG_CP_SET_BIN_DATA5_5 0x00000005
-#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
+#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
+ return ((val) << CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_SET_BIN_DATA5_6 0x00000006
-#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
+#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
+ return ((val) << CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_REG_TO_MEM_0 0x00000000
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index 879c13f..cdbea38 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -319,10 +311,8 @@
unsigned long irq_flags;
int i, irq_count, enable_count, cb_count;
- if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
- DPU_ERROR("invalid parameters\n");
+ if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
return 0;
- }
for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
@@ -343,31 +333,11 @@
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
- parent, &dpu_kms->irq_obj,
- &dpu_debugfs_core_irq_fops);
-
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove(dpu_kms->irq_obj.debugfs_file);
- dpu_kms->irq_obj.debugfs_file = NULL;
-}
-
-#else
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
- struct dentry *parent)
-{
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
+ debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
}
#endif
@@ -376,10 +346,7 @@
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
@@ -410,20 +377,12 @@
}
}
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
-{
- return 0;
-}
-
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 5e98bba..e30775e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DPU_CORE_IRQ_H__
@@ -24,13 +16,6 @@
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
/**
- * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
- * @dpu_kms: DPU handle
- * @return: 0 if success; error code otherwise
- */
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
-
-/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
* @dpu_kms: DPU handle
* @return: none
@@ -139,15 +124,8 @@
* dpu_debugfs_core_irq_init - register core irq debugfs
* @dpu_kms: pointer to kms
* @parent: debugfs directory root
- * @Return: 0 on success
*/
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
-/**
- * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
- * @dpu_kms: pointer to kms
- */
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
-
#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 41c5191..09a49b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -24,8 +16,6 @@
#include "dpu_crtc.h"
#include "dpu_core_perf.h"
-#define DPU_PERF_MODE_STRING_SIZE 128
-
/**
* enum dpu_perf_mode - performance tuning mode
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
@@ -57,31 +47,20 @@
return to_dpu_kms(priv->kms);
}
-static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
-{
- return dpu_crtc_is_enabled(crtc);
-}
-
static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
- bool intf_connected = false;
-
- if (!crtc)
- goto end;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
- _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ tmp_crtc->enabled) {
DPU_DEBUG("video interface connected crtc:%d\n",
tmp_crtc->base.id);
- intf_connected = true;
- goto end;
+ return true;
}
}
-end:
- return intf_connected;
+ return false;
}
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
@@ -90,7 +69,6 @@
struct dpu_core_perf_params *perf)
{
struct dpu_crtc_state *dpu_cstate;
- int i;
if (!kms || !kms->catalog || !crtc || !state || !perf) {
DPU_ERROR("invalid parameters\n");
@@ -101,35 +79,24 @@
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+ perf->bw_ctl = kms->catalog->perf.max_bw_high *
1000ULL;
- perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
- }
+ perf->max_per_pipe_ib = perf->bw_ctl;
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- perf->bw_ctl[i] = 0;
- perf->max_per_pipe_ib[i] = 0;
- }
+ perf->bw_ctl = 0;
+ perf->max_per_pipe_ib = 0;
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
- perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
- }
+ perf->bw_ctl = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
perf->core_clk_rate = kms->perf.fix_core_clk_rate;
}
DPU_DEBUG(
- "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+ perf->max_per_pipe_ib, perf->bw_ctl);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -142,7 +109,6 @@
struct dpu_crtc_state *dpu_cstate;
struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
- int i;
if (!crtc || !state) {
DPU_ERROR("invalid crtc\n");
@@ -164,31 +130,25 @@
/* obtain new values */
_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
- curr_client_type = dpu_crtc_get_client_type(crtc);
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
+ curr_client_type = dpu_crtc_get_client_type(crtc);
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
- (dpu_crtc_get_client_type(tmp_crtc) ==
- curr_client_type) &&
- (tmp_crtc != crtc)) {
- struct dpu_crtc_state *tmp_cstate =
- to_dpu_crtc_state(tmp_crtc->state);
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (tmp_crtc->enabled &&
+ (dpu_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) && (tmp_crtc != crtc)) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
- DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
- tmp_crtc->base.id,
- tmp_cstate->new_perf.bw_ctl[i],
- tmp_cstate->bw_control);
- /*
- * For bw check only use the bw if the
- * atomic property has been already set
- */
- if (tmp_cstate->bw_control)
- bw_sum_of_intfs +=
- tmp_cstate->new_perf.bw_ctl[i];
- }
+ DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
/* convert bandwidth to kb */
@@ -219,9 +179,9 @@
}
static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
- struct drm_crtc *crtc, u32 bus_id)
+ struct drm_crtc *crtc)
{
- struct dpu_core_perf_params perf = { { 0 } };
+ struct dpu_core_perf_params perf = { 0 };
enum dpu_crtc_client_type curr_client_type
= dpu_crtc_get_client_type(crtc);
struct drm_crtc *tmp_crtc;
@@ -229,18 +189,16 @@
int ret = 0;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
curr_client_type ==
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
- perf.max_per_pipe_ib[bus_id] =
- max(perf.max_per_pipe_ib[bus_id],
- dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+ perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
+ dpu_cstate->new_perf.max_per_pipe_ib);
- DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
- tmp_crtc->base.id, bus_id,
- dpu_cstate->new_perf.bw_ctl[bus_id]);
+ DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl);
}
}
return ret;
@@ -256,11 +214,9 @@
*/
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
- struct drm_crtc *tmp_crtc;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
- int i;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
@@ -276,30 +232,15 @@
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
- /* only do this for command mode rt client */
- if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ if (atomic_dec_return(&kms->bandwidth_ref) > 0)
return;
- /*
- * If video interface present, cmd panel bandwidth cannot be
- * released.
- */
- if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
- dpu_crtc_get_intf_mode(tmp_crtc) ==
- INTF_MODE_VIDEO)
- return;
- }
-
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- dpu_crtc->cur_perf.bw_ctl[i] = 0;
- _dpu_core_perf_crtc_update_bus(kms, crtc, i);
- }
+ dpu_crtc->cur_perf.bw_ctl = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc);
}
}
@@ -321,7 +262,7 @@
struct dpu_crtc_state *dpu_cstate;
drm_for_each_crtc(crtc, kms->dev) {
- if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ if (crtc->enabled) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
@@ -342,11 +283,10 @@
int params_changed, bool stop_req)
{
struct dpu_core_perf_params *new, *old;
- int update_bus = 0, update_clk = 0;
+ bool update_bus = false, update_clk = false;
u64 clk_rate = 0;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
- int i;
struct msm_drm_private *priv;
struct dpu_kms *kms;
int ret;
@@ -372,63 +312,50 @@
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
- if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- /*
- * cases for bus bandwidth update.
- * 1. new bandwidth vote - "ab or ib vote" is higher
- * than current vote for update request.
- * 2. new bandwidth vote - "ab or ib vote" is lower
- * than current vote at end of commit or stop.
- */
- if ((params_changed && ((new->bw_ctl[i] >
- old->bw_ctl[i]) ||
- (new->max_per_pipe_ib[i] >
- old->max_per_pipe_ib[i]))) ||
- (!params_changed && ((new->bw_ctl[i] <
- old->bw_ctl[i]) ||
- (new->max_per_pipe_ib[i] <
- old->max_per_pipe_ib[i])))) {
- DPU_DEBUG(
- "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
- crtc->base.id, params_changed,
- new->bw_ctl[i], old->bw_ctl[i]);
- old->bw_ctl[i] = new->bw_ctl[i];
- old->max_per_pipe_ib[i] =
- new->max_per_pipe_ib[i];
- update_bus |= BIT(i);
- }
+ if (crtc->enabled && !stop_req) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+ (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
+ (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+ (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
+ DPU_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl, old->bw_ctl);
+ old->bw_ctl = new->bw_ctl;
+ old->max_per_pipe_ib = new->max_per_pipe_ib;
+ update_bus = true;
}
if ((params_changed &&
- (new->core_clk_rate > old->core_clk_rate)) ||
- (!params_changed &&
- (new->core_clk_rate < old->core_clk_rate))) {
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
old->core_clk_rate = new->core_clk_rate;
- update_clk = 1;
+ update_clk = true;
}
} else {
DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
memset(old, 0, sizeof(*old));
memset(new, 0, sizeof(*new));
- update_bus = ~0;
- update_clk = 1;
+ update_bus = true;
+ update_clk = true;
}
- trace_dpu_perf_crtc_update(crtc->base.id,
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
- new->core_clk_rate, stop_req,
- update_bus, update_clk);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- if (update_bus & BIT(i)) {
- ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
- if (ret) {
- DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
- crtc->base.id, i);
- return ret;
- }
+ trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl,
+ new->core_clk_rate, stop_req, update_bus, update_clk);
+
+ if (update_bus) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bus bw vote\n",
+ crtc->base.id);
+ return ret;
}
}
@@ -462,24 +389,14 @@
struct dpu_core_perf *perf = file->private_data;
struct dpu_perf_cfg *cfg = &perf->catalog->perf;
u32 perf_mode = 0;
- char buf[10];
+ int ret;
- if (!perf)
- return -ENODEV;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
-
- if (kstrtouint(buf, 0, &perf_mode))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+ if (ret)
+ return ret;
if (perf_mode >= DPU_PERF_MODE_MAX)
- return -EFAULT;
+ return -EINVAL;
if (perf_mode == DPU_PERF_MODE_FIXED) {
DRM_INFO("fix performance mode\n");
@@ -504,29 +421,16 @@
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
- int len = 0;
- char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+ int len;
+ char buf[128];
- if (!perf)
- return -ENODEV;
-
- if (*ppos)
- return 0; /* the end */
-
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
perf->perf_tune.mode,
perf->perf_tune.min_core_clk,
perf->perf_tune.min_bus_vote);
- if (len < 0 || len >= sizeof(buf))
- return 0;
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
-
- *ppos += len; /* increase offset */
-
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static const struct file_operations dpu_core_perf_mode_fops = {
@@ -535,70 +439,41 @@
.write = _dpu_core_perf_mode_write,
};
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
- debugfs_remove_recursive(perf->debugfs_root);
- perf->debugfs_root = NULL;
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
-{
+ struct dpu_core_perf *perf = &dpu_kms->perf;
struct dpu_mdss_cfg *catalog = perf->catalog;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
+ struct dentry *entry;
- priv = perf->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
+ entry = debugfs_create_dir("core_perf", parent);
- dpu_kms = to_dpu_kms(priv->kms);
-
- perf->debugfs_root = debugfs_create_dir("core_perf", parent);
- if (!perf->debugfs_root) {
- DPU_ERROR("failed to create core perf debugfs\n");
- return -EINVAL;
- }
-
- debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("max_core_clk_rate", 0600, entry,
&perf->max_core_clk_rate);
- debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("core_clk_rate", 0600, entry,
&perf->core_clk_rate);
- debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_low", 0600, entry,
(u32 *)&catalog->perf.max_bw_low);
- debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_high", 0600, entry,
(u32 *)&catalog->perf.max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_core_ib", 0600, entry,
(u32 *)&catalog->perf.min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_llcc_ib", 0600, entry,
(u32 *)&catalog->perf.min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_dram_ib", 0600, entry,
(u32 *)&catalog->perf.min_dram_ib);
- debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
- debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
}
-#else
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
-{
- return 0;
-}
#endif
void dpu_core_perf_destroy(struct dpu_core_perf *perf)
@@ -608,10 +483,8 @@
return;
}
- dpu_core_perf_debugfs_destroy(perf);
perf->max_core_clk_rate = 0;
perf->core_clk = NULL;
- perf->phandle = NULL;
perf->catalog = NULL;
perf->dev = NULL;
}
@@ -619,12 +492,10 @@
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk)
{
perf->dev = dev;
perf->catalog = catalog;
- perf->phandle = phandle;
perf->core_clk = core_clk;
perf->max_core_clk_rate = core_clk->max_rate;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index fbcbe0c..cf4b9b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_CORE_PERF_H_
@@ -19,19 +11,31 @@
#include <drm/drm_crtc.h>
#include "dpu_hw_catalog.h"
-#include "dpu_power_handle.h"
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
+ * enum dpu_core_perf_data_bus_id - data bus identifier
+ * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
+ */
+enum dpu_core_perf_data_bus_id {
+ DPU_CORE_PERF_DATA_BUS_ID_MNOC,
+ DPU_CORE_PERF_DATA_BUS_ID_LLCC,
+ DPU_CORE_PERF_DATA_BUS_ID_EBI,
+ DPU_CORE_PERF_DATA_BUS_ID_MAX,
+};
+
+/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
* @bw_ctl: arbitrated bandwidth request
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
- u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 max_per_pipe_ib;
+ u64 bw_ctl;
u64 core_clk_rate;
};
@@ -52,7 +56,6 @@
* @dev: Pointer to drm device
* @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration
- * @phandle: Pointer to power handler
* @core_clk: Pointer to core clock structure
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
@@ -66,7 +69,6 @@
struct drm_device *dev;
struct dentry *debugfs_root;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle *phandle;
struct dss_clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
@@ -113,21 +115,20 @@
* @perf: Pointer to core performance context
* @dev: Pointer to drm device
* @catalog: Pointer to catalog
- * @phandle: Pointer to power handle
* @core_clk: pointer to core clock
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk);
+struct dpu_kms;
+
/**
* dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @perf: Pointer to core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
* @debugfs_parent: Pointer to parent debugfs
*/
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent);
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4752f08..ce59adf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1,30 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/sort.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
-#include <drm/drm_mode.h>
+
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
@@ -33,7 +24,6 @@
#include "dpu_plane.h"
#include "dpu_encoder.h"
#include "dpu_vbif.h"
-#include "dpu_power_handle.h"
#include "dpu_core_perf.h"
#include "dpu_trace.h"
@@ -47,239 +37,16 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
-#define MISR_BUFF_SIZE 256
+/* timeout in ms waiting for frame done */
+#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
- struct msm_drm_private *priv;
-
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return NULL;
- }
- priv = crtc->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid kms\n");
- return NULL;
- }
+ struct msm_drm_private *priv = crtc->dev->dev_private;
return to_dpu_kms(priv->kms);
}
-static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
-{
- struct drm_crtc *crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!dpu_crtc) {
- DPU_ERROR("invalid dpu crtc\n");
- return -EINVAL;
- }
-
- crtc = &dpu_crtc->base;
- if (!crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid drm device\n");
- return -EINVAL;
- }
-
- priv = crtc->dev->dev_private;
- if (!priv->kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
-
- if (enable)
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- else
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- return 0;
-}
-
-/**
- * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
- * @rp: Pointer to resource pool
- * return: Pointer to drm crtc if success; null otherwise
- */
-static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
-{
- if (!rp)
- return NULL;
-
- return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
-}
-
-/**
- * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
- * @rp: Pointer to resource pool
- * @force: True to reclaim all resources; otherwise, reclaim only unused ones
- * return: None
- */
-static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
-{
- struct dpu_crtc_res *res, *next;
- struct drm_crtc *crtc;
-
- crtc = _dpu_crtc_rp_to_crtc(rp);
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
- force ? "destroy" : "free_unused");
-
- list_for_each_entry_safe(res, next, &rp->res_list, list) {
- if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
- continue;
- DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
- crtc->base.id, rp->sequence_id,
- res->type, res->tag, res->val,
- atomic_read(&res->refcount));
- list_del(&res->list);
- if (res->ops.put)
- res->ops.put(res->val);
- kfree(res);
- }
-}
-
-/**
- * _dpu_crtc_rp_free_unused - free unused resource in pool
- * @rp: Pointer to resource pool
- * return: none
- */
-static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
-{
- mutex_lock(rp->rp_lock);
- _dpu_crtc_rp_reclaim(rp, false);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_rp_destroy - destroy resource pool
- * @rp: Pointer to resource pool
- * return: None
- */
-static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
-{
- mutex_lock(rp->rp_lock);
- list_del_init(&rp->rp_list);
- _dpu_crtc_rp_reclaim(rp, true);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_hw_blk_get - get callback for hardware block
- * @val: Resource handle
- * @type: Resource type
- * @tag: Search tag for given resource
- * return: Resource handle
- */
-static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
-{
- DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
- return dpu_hw_blk_get(val, type, tag);
-}
-
-/**
- * _dpu_crtc_hw_blk_put - put callback for hardware block
- * @val: Resource handle
- * return: None
- */
-static void _dpu_crtc_hw_blk_put(void *val)
-{
- DPU_DEBUG("res://%pK\n", val);
- dpu_hw_blk_put(val);
-}
-
-/**
- * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
- * @rp: Pointer to original resource pool
- * @dup_rp: Pointer to duplicated resource pool
- * return: None
- */
-static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
- struct dpu_crtc_respool *dup_rp)
-{
- struct dpu_crtc_res *res, *dup_res;
- struct drm_crtc *crtc;
-
- if (!rp || !dup_rp || !rp->rp_head) {
- DPU_ERROR("invalid resource pool\n");
- return;
- }
-
- crtc = _dpu_crtc_rp_to_crtc(rp);
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
-
- mutex_lock(rp->rp_lock);
- dup_rp->sequence_id = rp->sequence_id + 1;
- INIT_LIST_HEAD(&dup_rp->res_list);
- dup_rp->ops = rp->ops;
- list_for_each_entry(res, &rp->res_list, list) {
- dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
- if (!dup_res) {
- mutex_unlock(rp->rp_lock);
- return;
- }
- INIT_LIST_HEAD(&dup_res->list);
- atomic_set(&dup_res->refcount, 0);
- dup_res->type = res->type;
- dup_res->tag = res->tag;
- dup_res->val = res->val;
- dup_res->ops = res->ops;
- dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
- DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
- crtc->base.id, dup_rp->sequence_id,
- dup_res->type, dup_res->tag, dup_res->val,
- atomic_read(&dup_res->refcount));
- list_add_tail(&dup_res->list, &dup_rp->res_list);
- if (dup_res->ops.get)
- dup_res->ops.get(dup_res->val, 0, -1);
- }
-
- dup_rp->rp_lock = rp->rp_lock;
- dup_rp->rp_head = rp->rp_head;
- INIT_LIST_HEAD(&dup_rp->rp_list);
- list_add_tail(&dup_rp->rp_list, rp->rp_head);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_rp_reset - reset resource pool after allocation
- * @rp: Pointer to original resource pool
- * @rp_lock: Pointer to serialization resource pool lock
- * @rp_head: Pointer to crtc resource pool head
- * return: None
- */
-static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
- struct mutex *rp_lock, struct list_head *rp_head)
-{
- if (!rp || !rp_lock || !rp_head) {
- DPU_ERROR("invalid resource pool\n");
- return;
- }
-
- mutex_lock(rp_lock);
- rp->rp_lock = rp_lock;
- rp->rp_head = rp_head;
- INIT_LIST_HEAD(&rp->rp_list);
- rp->sequence_id = 0;
- INIT_LIST_HEAD(&rp->res_list);
- rp->ops.get = _dpu_crtc_hw_blk_get;
- rp->ops.put = _dpu_crtc_hw_blk_put;
- list_add_tail(&rp->rp_list, rp->rp_head);
- mutex_unlock(rp_lock);
-}
-
static void dpu_crtc_destroy(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@@ -289,22 +56,34 @@
if (!crtc)
return;
- dpu_crtc->phandle = NULL;
-
drm_crtc_cleanup(crtc);
- mutex_destroy(&dpu_crtc->crtc_lock);
kfree(dpu_crtc);
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
- struct dpu_plane_state *pstate)
+ struct dpu_plane_state *pstate, struct dpu_format *format)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
+ uint32_t blend_op;
+ struct drm_format_name_buf format_name;
/* default to opaque blending */
- lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
- DPU_BLEND_FG_ALPHA_FG_CONST |
- DPU_BLEND_BG_ALPHA_BG_CONST);
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST;
+
+ if (format->alpha_enable) {
+ /* coverage blending */
+ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_INV_ALPHA;
+ }
+
+ lm->ops.setup_blend_config(lm, pstate->stage,
+ 0xFF, 0, blend_op);
+
+ DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
+ drm_get_format_name(format->base.pixel_format, &format_name),
+ format->alpha_enable, blend_op);
}
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
@@ -317,9 +96,9 @@
crtc_state = to_dpu_crtc_state(crtc->state);
lm_horiz_position = 0;
- for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
- struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
struct dpu_hw_mixer_cfg cfg;
if (!lm_roi || !drm_rect_visible(lm_roi))
@@ -339,28 +118,17 @@
struct drm_plane *plane;
struct drm_framebuffer *fb;
struct drm_plane_state *state;
- struct dpu_crtc_state *cstate;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_plane_state *pstate = NULL;
struct dpu_format *format;
- struct dpu_hw_ctl *ctl;
- struct dpu_hw_mixer *lm;
- struct dpu_hw_stage_cfg *stage_cfg;
+ struct dpu_hw_ctl *ctl = mixer->lm_ctl;
+ struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
u32 flush_mask;
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
- if (!dpu_crtc || !mixer) {
- DPU_ERROR("invalid dpu_crtc or mixer\n");
- return;
- }
-
- ctl = mixer->hw_ctl;
- lm = mixer->hw_lm;
- stage_cfg = &dpu_crtc->stage_cfg;
- cstate = to_dpu_crtc_state(crtc->state);
-
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -379,10 +147,6 @@
state->fb ? state->fb->base.id : -1);
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
- if (!format) {
- DPU_ERROR("invalid format\n");
- return;
- }
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
@@ -400,8 +164,9 @@
fb ? fb->modifier : 0);
/* blend config update */
- for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
- _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+ for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
+ pstate, format);
mixer[lm_idx].flush_mask |= flush_mask;
@@ -422,38 +187,25 @@
*/
static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *dpu_crtc_state;
- struct dpu_crtc_mixer *mixer;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
-
int i;
- if (!crtc)
- return;
-
- dpu_crtc = to_dpu_crtc(crtc);
- dpu_crtc_state = to_dpu_crtc_state(crtc->state);
- mixer = dpu_crtc->mixers;
-
DPU_DEBUG("%s\n", dpu_crtc->name);
- if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
- DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
- return;
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
- if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ for (i = 0; i < cstate->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
DPU_ERROR("invalid lm or ctl assigned to mixer\n");
return;
}
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
- if (mixer[i].hw_ctl->ops.clear_all_blendstages)
- mixer[i].hw_ctl->ops.clear_all_blendstages(
- mixer[i].hw_ctl);
+ if (mixer[i].lm_ctl->ops.clear_all_blendstages)
+ mixer[i].lm_ctl->ops.clear_all_blendstages(
+ mixer[i].lm_ctl);
}
/* initialize stage cfg */
@@ -461,8 +213,8 @@
_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
- ctl = mixer[i].hw_ctl;
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
lm = mixer[i].hw_lm;
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
@@ -519,16 +271,17 @@
return INTF_MODE_NONE;
}
- drm_for_each_encoder(encoder, crtc->dev)
- if (encoder->crtc == crtc)
- return dpu_encoder_get_intf_mode(encoder);
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* TODO: Returns the first INTF_MODE, could there be multiple values? */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ return dpu_encoder_get_intf_mode(encoder);
return INTF_MODE_NONE;
}
-static void dpu_crtc_vblank_cb(void *data)
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc = (struct drm_crtc *)data;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
/* keep statistics on vblank callback - with auto reset via debugfs */
@@ -543,34 +296,13 @@
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
- struct msm_drm_private *priv;
- struct dpu_crtc_frame_event *fevent;
- struct drm_crtc *crtc;
- struct dpu_crtc *dpu_crtc;
- struct dpu_kms *dpu_kms;
+ struct dpu_crtc_frame_event *fevent = container_of(work,
+ struct dpu_crtc_frame_event, work);
+ struct drm_crtc *crtc = fevent->crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
unsigned long flags;
bool frame_done = false;
- if (!work) {
- DPU_ERROR("invalid work handle\n");
- return;
- }
-
- fevent = container_of(work, struct dpu_crtc_frame_event, work);
- if (!fevent->crtc || !fevent->crtc->state) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- crtc = fevent->crtc;
- dpu_crtc = to_dpu_crtc(crtc);
-
- dpu_kms = _dpu_crtc_get_kms(crtc);
- if (!dpu_kms) {
- DPU_ERROR("invalid kms handle\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
DPU_ATRACE_BEGIN("crtc_frame_event");
DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
@@ -581,12 +313,7 @@
| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
if (atomic_read(&dpu_crtc->frame_pending) < 1) {
- /* this should not happen */
- DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
- crtc->base.id,
- fevent->event,
- ktime_to_ns(fevent->ts),
- atomic_read(&dpu_crtc->frame_pending));
+ /* ignore vblank when not pending */
} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
@@ -636,11 +363,6 @@
unsigned long flags;
u32 crtc_id;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
/* Nothing to do on idle event */
if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
return;
@@ -669,113 +391,25 @@
kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
}
-void dpu_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
- if (!crtc || !crtc->state) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
trace_dpu_crtc_complete_commit(DRMID(crtc));
}
-static void _dpu_crtc_setup_mixer_for_encoder(
- struct drm_crtc *crtc,
- struct drm_encoder *enc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
- struct dpu_rm *rm = &dpu_kms->rm;
- struct dpu_crtc_mixer *mixer;
- struct dpu_hw_ctl *last_valid_ctl = NULL;
- int i;
- struct dpu_rm_hw_iter lm_iter, ctl_iter;
-
- dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
- dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
-
- /* Set up all the mixers and ctls reserved by this encoder */
- for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
- mixer = &dpu_crtc->mixers[i];
-
- if (!dpu_rm_get_hw(rm, &lm_iter))
- break;
- mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
- /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
- if (!dpu_rm_get_hw(rm, &ctl_iter)) {
- DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
- mixer->hw_lm->idx - LM_0);
- mixer->hw_ctl = last_valid_ctl;
- } else {
- mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->hw_ctl;
- }
-
- /* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->hw_ctl) {
- DPU_ERROR("no valid ctls found for lm %d\n",
- mixer->hw_lm->idx - LM_0);
- return;
- }
-
- mixer->encoder = enc;
-
- dpu_crtc->num_mixers++;
- DPU_DEBUG("setup mixer %d: lm %d\n",
- i, mixer->hw_lm->idx - LM_0);
- DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->hw_ctl->idx - CTL_0);
- }
-}
-
-static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct drm_encoder *enc;
-
- dpu_crtc->num_mixers = 0;
- dpu_crtc->mixers_swapped = false;
- memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
-
- mutex_lock(&dpu_crtc->crtc_lock);
- /* Check for mixers on all encoders attached to this crtc */
- list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
-}
-
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
- struct drm_display_mode *adj_mode;
- u32 crtc_split_width;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct drm_display_mode *adj_mode = &state->adjusted_mode;
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
int i;
- if (!crtc || !state) {
- DPU_ERROR("invalid args\n");
- return;
- }
-
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = to_dpu_crtc_state(state);
-
- adj_mode = &state->adjusted_mode;
- crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
-
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
r->y1 = 0;
r->x2 = r->x1 + crtc_split_width;
- r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+ r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
@@ -787,6 +421,7 @@
struct drm_crtc_state *old_state)
{
struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
struct drm_encoder *encoder;
struct drm_device *dev;
unsigned long flags;
@@ -806,13 +441,11 @@
DPU_DEBUG("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!dpu_crtc->num_mixers) {
- _dpu_crtc_setup_mixers(crtc);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
- }
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
if (dpu_crtc->event) {
WARN_ON(dpu_crtc->event);
@@ -823,20 +456,16 @@
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- /* encoder will trigger pending mask now */
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
- }
/*
* If no mixers have been allocated in dpu_crtc_atomic_check(),
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
_dpu_crtc_blend_setup(crtc);
@@ -861,11 +490,6 @@
unsigned long flags;
struct dpu_crtc_state *cstate;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
if (!crtc->state->enable) {
DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
crtc->base.id, crtc->state->enable);
@@ -900,7 +524,7 @@
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
/*
@@ -951,8 +575,6 @@
DPU_DEBUG("crtc%d\n", crtc->base.id);
- _dpu_crtc_rp_destroy(&cstate->rp);
-
__drm_atomic_helper_crtc_destroy_state(state);
kfree(cstate);
@@ -960,15 +582,9 @@
static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
int ret, rc = 0;
- if (!crtc) {
- DPU_ERROR("invalid argument\n");
- return -EINVAL;
- }
- dpu_crtc = to_dpu_crtc(crtc);
-
if (!atomic_read(&dpu_crtc->frame_pending)) {
DPU_DEBUG("no frames pending\n");
return 0;
@@ -976,7 +592,7 @@
DPU_ATRACE_BEGIN("frame done completion wait");
ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
- msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
if (!ret) {
DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
rc = -ETIMEDOUT;
@@ -989,62 +605,27 @@
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- struct drm_device *dev;
- struct dpu_crtc *dpu_crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- struct dpu_crtc_state *cstate;
- int ret;
-
- if (!crtc) {
- DPU_ERROR("invalid argument\n");
- return;
- }
- dev = crtc->dev;
- dpu_crtc = to_dpu_crtc(crtc);
- dpu_kms = _dpu_crtc_get_kms(crtc);
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid argument\n");
- return;
- }
-
- priv = dpu_kms->dev->dev_private;
- cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
* it means we are trying to start a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
DPU_ATRACE_BEGIN("crtc_commit");
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct dpu_encoder_kickoff_params params = { 0 };
-
- if (encoder->crtc != crtc)
- continue;
-
- /*
- * Encoder will flush/start now, unless it has a tx pending.
- * If so, it may delay and flush at an irq event (e.g. ppdone)
- */
- dpu_encoder_prepare_for_kickoff(encoder, ¶ms);
- }
-
- /* wait for frame_event_done completion */
- DPU_ATRACE_BEGIN("wait_for_frame_done_event");
- ret = _dpu_crtc_wait_for_frame_done(crtc);
- DPU_ATRACE_END("wait_for_frame_done_event");
- if (ret) {
- DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
- crtc->base.id,
- atomic_read(&dpu_crtc->frame_pending));
- goto end;
- }
+ /*
+ * Encoder will flush/start now, unless it has a tx pending. If so, it
+ * may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder);
if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
/* acquire bandwidth and other resources */
@@ -1056,128 +637,21 @@
dpu_vbif_clear_errors(dpu_kms);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_kickoff(encoder);
- }
-end:
reinit_completion(&dpu_crtc->frame_done_comp);
DPU_ATRACE_END("crtc_commit");
}
-/**
- * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
- * @dpu_crtc: Pointer to dpu crtc structure
- * @enable: Whether to enable/disable vblanks
- *
- * @Return: error code
- */
-static int _dpu_crtc_vblank_enable_no_lock(
- struct dpu_crtc *dpu_crtc, bool enable)
+static void dpu_crtc_reset(struct drm_crtc *crtc)
{
- struct drm_device *dev;
- struct drm_crtc *crtc;
- struct drm_encoder *enc;
+ struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
- if (!dpu_crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
+ if (crtc->state)
+ dpu_crtc_destroy_state(crtc, crtc->state);
- crtc = &dpu_crtc->base;
- dev = crtc->dev;
-
- if (enable) {
- int ret;
-
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- ret = _dpu_crtc_power_enable(dpu_crtc, true);
- mutex_lock(&dpu_crtc->crtc_lock);
- if (ret)
- return ret;
-
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc,
- dpu_crtc_vblank_cb, (void *)crtc);
- }
- } else {
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc, NULL, NULL);
- }
-
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
- mutex_lock(&dpu_crtc->crtc_lock);
- }
-
- return 0;
-}
-
-/**
- * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
- * @crtc: Pointer to drm crtc object
- * @enable: true to enable suspend, false to indicate resume
- */
-static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
-{
- struct dpu_crtc *dpu_crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- int ret = 0;
-
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
- dpu_crtc = to_dpu_crtc(crtc);
- priv = crtc->dev->dev_private;
-
- if (!priv->kms) {
- DPU_ERROR("invalid crtc kms\n");
- return;
- }
- dpu_kms = to_dpu_kms(priv->kms);
-
- DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
-
- mutex_lock(&dpu_crtc->crtc_lock);
-
- /*
- * If the vblank is enabled, release a power reference on suspend
- * and take it back during resume (if it is still enabled).
- */
- trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
- if (dpu_crtc->suspend == enable)
- DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
- crtc->base.id, enable);
- else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
- }
-
- dpu_crtc->suspend = enable;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
}
/**
@@ -1206,124 +680,19 @@
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
- _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
-
return &cstate->base;
}
-/**
- * dpu_crtc_reset - reset hook for CRTCs
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- * @crtc: Pointer to drm crtc structure
- */
-static void dpu_crtc_reset(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- /* revert suspend actions, if necessary */
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, false);
-
- /* remove previous state, if present */
- if (crtc->state) {
- dpu_crtc_destroy_state(crtc, crtc->state);
- crtc->state = 0;
- }
-
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
- if (!cstate) {
- DPU_ERROR("failed to allocate state\n");
- return;
- }
-
- _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
- &dpu_crtc->rp_head);
-
- cstate->base.crtc = crtc;
- crtc->state = &cstate->base;
-}
-
-static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
-{
- struct drm_crtc *crtc = arg;
- struct dpu_crtc *dpu_crtc;
- struct drm_encoder *encoder;
- struct dpu_crtc_mixer *m;
- u32 i, misr_status;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
- dpu_crtc = to_dpu_crtc(crtc);
-
- mutex_lock(&dpu_crtc->crtc_lock);
-
- trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
-
- switch (event_type) {
- case DPU_POWER_EVENT_POST_ENABLE:
- /* restore encoder; crtc will be programmed during commit */
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_virt_restore(encoder);
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
- !dpu_crtc->misr_enable)
- continue;
-
- m->hw_lm->ops.setup_misr(m->hw_lm, true,
- dpu_crtc->misr_frame_count);
- }
- break;
- case DPU_POWER_EVENT_PRE_DISABLE:
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
- !dpu_crtc->misr_enable)
- continue;
-
- misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
- dpu_crtc->misr_data[i] = misr_status ? misr_status :
- dpu_crtc->misr_data[i];
- }
- break;
- case DPU_POWER_EVENT_POST_DISABLE:
- /**
- * Nothing to do. All the planes on the CRTC will be
- * programmed for every frame
- */
- break;
- default:
- DPU_DEBUG("event:%d not handled\n", event_type);
- break;
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
-}
-
-static void dpu_crtc_disable(struct drm_crtc *crtc)
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
struct drm_display_mode *mode;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
- int ret;
unsigned long flags;
+ bool release_bandwidth = false;
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
DPU_ERROR("invalid crtc\n");
@@ -1336,13 +705,19 @@
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, true);
-
/* Disable/save vblank irq handling */
drm_crtc_vblank_off(crtc);
- mutex_lock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ old_crtc_state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ release_bandwidth = true;
+ dpu_encoder_assign_crtc(encoder, NULL);
+ }
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -1351,50 +726,36 @@
atomic_read(&dpu_crtc->frame_pending));
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
- }
dpu_crtc->enabled = false;
if (atomic_read(&dpu_crtc->frame_pending)) {
trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
atomic_read(&dpu_crtc->frame_pending));
- dpu_core_perf_crtc_release_bw(crtc);
+ if (release_bandwidth)
+ dpu_core_perf_crtc_release_bw(crtc);
atomic_set(&dpu_crtc->frame_pending, 0);
}
dpu_core_perf_crtc_update(crtc, 0, true);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
- }
- if (dpu_crtc->power_event)
- dpu_power_handle_unregister_event(dpu_crtc->phandle,
- dpu_crtc->power_event);
-
- memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
- dpu_crtc->num_mixers = 0;
- dpu_crtc->mixers_swapped = false;
+ memset(cstate->mixers, 0, sizeof(cstate->mixers));
+ cstate->num_mixers = 0;
/* disable clk & bw control until clk & bw properties are set */
cstate->bw_control = false;
cstate->bw_split_vote = false;
- mutex_unlock(&dpu_crtc->crtc_lock);
-
if (crtc->state->event && !crtc->state->active) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
+
+ pm_runtime_put_sync(crtc->dev->dev);
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1403,7 +764,7 @@
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
- int ret;
+ bool request_bandwidth;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
DPU_ERROR("invalid crtc\n");
@@ -1411,38 +772,33 @@
}
priv = crtc->dev->dev_private;
+ pm_runtime_get_sync(crtc->dev->dev);
+
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ request_bandwidth = true;
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
}
- mutex_lock(&dpu_crtc->crtc_lock);
+ if (request_bandwidth)
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
- if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
- }
dpu_crtc->enabled = true;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, crtc);
/* Enable/restore vblank irq handling */
drm_crtc_vblank_on(crtc);
-
- dpu_crtc->power_event = dpu_power_handle_register_event(
- dpu_crtc->phandle,
- DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
- DPU_POWER_EVENT_PRE_DISABLE,
- dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
-
}
struct plane_state {
@@ -1496,7 +852,7 @@
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+ mixer_width = mode->hdisplay / cstate->num_mixers;
_dpu_crtc_setup_lm_bounds(crtc, state);
@@ -1608,6 +964,8 @@
}
}
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
rc = dpu_core_perf_crtc_check(crtc, state);
if (rc) {
DPU_ERROR("crtc%d failed performance check %d\n",
@@ -1678,32 +1036,38 @@
}
end:
- _dpu_crtc_rp_free_unused(&cstate->rp);
kfree(pstates);
return rc;
}
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
- struct dpu_crtc *dpu_crtc;
- int ret;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
- dpu_crtc = to_dpu_crtc(crtc);
-
- mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+
+ /*
+ * Normally we would iterate through encoder_mask in crtc state to find
+ * attached encoders. In this case, we might be disabling vblank _after_
+ * encoder_mask has been cleared.
+ *
+ * Instead, we "assign" a crtc to the encoder in enable and clear it in
+ * disable (which is also after encoder_mask is cleared). So instead of
+ * using encoder mask, we'll ask the encoder to toggle itself iff it's
+ * currently assigned to our crtc.
+ *
+ * Note also that this function cannot be called while crtc is disabled
+ * since we use drm_crtc_vblank_on/off. So we don't need to worry
+ * about the assigned crtcs being inconsistent with the current state
+ * (which means no need to worry about modeset locks).
+ */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+ dpu_crtc);
+
+ dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
}
- dpu_crtc->vblank_requested = en;
- mutex_unlock(&dpu_crtc->crtc_lock);
return 0;
}
@@ -1724,31 +1088,29 @@
int i, out_width;
- if (!s || !s->private)
- return -EINVAL;
-
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
+
+ drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
- mutex_lock(&dpu_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
- out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+ out_width = mode->hdisplay / cstate->num_mixers;
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
seq_puts(s, "\n");
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
+ for (i = 0; i < cstate->num_mixers; ++i) {
+ m = &cstate->mixers[i];
if (!m->hw_lm)
seq_printf(s, "\tmixer[%d] has no lm\n", i);
- else if (!m->hw_ctl)
+ else if (!m->lm_ctl)
seq_printf(s, "\tmixer[%d] has no ctl\n", i);
else
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
- m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
out_width, mode->vdisplay);
}
@@ -1818,9 +1180,7 @@
dpu_crtc->vblank_cb_time = ktime_set(0, 0);
}
- seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
-
- mutex_unlock(&dpu_crtc->crtc_lock);
+ drm_modeset_unlock_all(crtc->dev);
return 0;
}
@@ -1830,113 +1190,6 @@
return single_open(file, _dpu_debugfs_status_show, inode->i_private);
}
-static ssize_t _dpu_crtc_misr_setup(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_mixer *m;
- int i = 0, rc;
- char buf[MISR_BUFF_SIZE + 1];
- u32 frame_count, enable;
- size_t buff_copy;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_crtc = file->private_data;
- buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
- if (copy_from_user(buf, user_buf, buff_copy)) {
- DPU_ERROR("buffer copy failed\n");
- return -EINVAL;
- }
-
- buf[buff_copy] = 0; /* end of string */
-
- if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
- return -EINVAL;
-
- rc = _dpu_crtc_power_enable(dpu_crtc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_crtc->crtc_lock);
- dpu_crtc->misr_enable = enable;
- dpu_crtc->misr_frame_count = frame_count;
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- dpu_crtc->misr_data[i] = 0;
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
- continue;
-
- m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
- }
- mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
-
- return count;
-}
-
-static ssize_t _dpu_crtc_misr_read(struct file *file,
- char __user *user_buff, size_t count, loff_t *ppos)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_mixer *m;
- int i = 0, rc;
- u32 misr_status;
- ssize_t len = 0;
- char buf[MISR_BUFF_SIZE + 1] = {'\0'};
-
- if (*ppos)
- return 0;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_crtc = file->private_data;
- rc = _dpu_crtc_power_enable(dpu_crtc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_crtc->crtc_lock);
- if (!dpu_crtc->misr_enable) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "disabled\n");
- goto buff_check;
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
- continue;
-
- misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
- dpu_crtc->misr_data[i] = misr_status ? misr_status :
- dpu_crtc->misr_data[i];
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
- m->hw_lm->idx - LM_0);
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
- dpu_crtc->misr_data[i]);
- }
-
-buff_check:
- if (count <= len) {
- len = 0;
- goto end;
- }
-
- if (copy_to_user(user_buff, buf, len)) {
- len = -EFAULT;
- goto end;
- }
-
- *ppos += len; /* increase offset */
-
-end:
- mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
- return len;
-}
-
#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
@@ -1954,34 +1207,14 @@
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct dpu_crtc_res *res;
- struct dpu_crtc_respool *rp;
- int i;
seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- seq_printf(s, "bw_ctl[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
- dpu_crtc->cur_perf.bw_ctl[i]);
- seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
- dpu_crtc->cur_perf.max_per_pipe_ib[i]);
- }
-
- mutex_lock(&dpu_crtc->rp_lock);
- list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
- seq_printf(s, "rp.%d: ", rp->sequence_id);
- list_for_each_entry(res, &rp->res_list, list)
- seq_printf(s, "0x%x/0x%llx/%pK/%d ",
- res->type, res->tag, res->val,
- atomic_read(&res->refcount));
- seq_puts(s, "\n");
- }
- mutex_unlock(&dpu_crtc->rp_lock);
+ seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "max_per_pipe_ib: %llu\n",
+ dpu_crtc->cur_perf.max_per_pipe_ib);
return 0;
}
@@ -1989,8 +1222,7 @@
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_kms *dpu_kms;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
static const struct file_operations debugfs_status_fops = {
.open = _dpu_debugfs_status_open,
@@ -1998,26 +1230,10 @@
.llseek = seq_lseek,
.release = single_release,
};
- static const struct file_operations debugfs_misr_fops = {
- .open = simple_open,
- .read = _dpu_crtc_misr_read,
- .write = _dpu_crtc_misr_setup,
- };
-
- if (!crtc)
- return -EINVAL;
- dpu_crtc = to_dpu_crtc(crtc);
-
- dpu_kms = _dpu_crtc_get_kms(crtc);
- if (!dpu_kms)
- return -EINVAL;
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
- if (!dpu_crtc->debugfs_root)
- return -ENOMEM;
- /* don't error check these */
debugfs_create_file("status", 0400,
dpu_crtc->debugfs_root,
dpu_crtc, &debugfs_status_fops);
@@ -2025,30 +1241,14 @@
dpu_crtc->debugfs_root,
&dpu_crtc->base,
&dpu_crtc_debugfs_state_fops);
- debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
- dpu_crtc, &debugfs_misr_fops);
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return;
- dpu_crtc = to_dpu_crtc(crtc);
- debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
#else
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-}
#endif /* CONFIG_DEBUG_FS */
static int dpu_crtc_late_register(struct drm_crtc *crtc)
@@ -2058,7 +1258,9 @@
static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
{
- _dpu_crtc_destroy_debugfs(crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
}
static const struct drm_crtc_funcs dpu_crtc_funcs = {
@@ -2073,7 +1275,7 @@
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
- .disable = dpu_crtc_disable,
+ .atomic_disable = dpu_crtc_disable,
.atomic_enable = dpu_crtc_enable,
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
@@ -2081,7 +1283,8 @@
};
/* initialize crtc */
-struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor)
{
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
@@ -2099,13 +1302,9 @@
crtc = &dpu_crtc->base;
crtc->dev = dev;
- mutex_init(&dpu_crtc->crtc_lock);
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
- mutex_init(&dpu_crtc->rp_lock);
- INIT_LIST_HEAD(&dpu_crtc->rp_head);
-
init_completion(&dpu_crtc->frame_done_comp);
INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
@@ -2118,7 +1317,7 @@
dpu_crtc_frame_event_work);
}
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
@@ -2129,8 +1328,6 @@
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
- dpu_crtc->phandle = &kms->phandle;
-
DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index e87109e..5174e86 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _DPU_CRTC_H_
@@ -83,15 +72,13 @@
/**
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
- * @hw_ctl: CTL Path HW driver context
- * @encoder: Encoder attached to this lm & ctl
+ * @lm_ctl: CTL Path HW driver context
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
- struct dpu_hw_ctl *hw_ctl;
- struct drm_encoder *encoder;
+ struct dpu_hw_ctl *lm_ctl;
u32 mixer_op_mode;
u32 flush_mask;
};
@@ -121,11 +108,6 @@
* struct dpu_crtc - virtualized CRTC data structure
* @base : Base drm crtc structure
* @name : ASCII description of this crtc
- * @num_ctls : Number of ctl paths in use
- * @num_mixers : Number of mixers in use
- * @mixers_swapped: Whether the mixers have been swapped for left/right update
- * especially in the case of DSC Merge.
- * @mixers : List of active mixers
* @event : Pointer to last received drm vblank event. If there is a
* pending vblank event, this will be non-null.
* @vsync_count : Running count of received vsync events
@@ -137,8 +119,6 @@
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
- * @vblank_requested : whether the user has requested vblank events
- * @suspend : whether or not a suspend operation is in progress
* @enabled : whether the DPU CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
@@ -147,7 +127,6 @@
* @dirty_list : list of color processing features are dirty
* @ad_dirty: list containing ad properties that are dirty
* @ad_active: list containing ad properties that are active
- * @crtc_lock : crtc lock around create, destroy and access.
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
@@ -156,27 +135,13 @@
* @event_thread : Pointer to event handler thread
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
- * @misr_enable : boolean entry indicates misr enable/disable status.
- * @misr_frame_count : misr frame count provided by client
- * @misr_data : store misr data before turning off the clocks.
* @phandle: Pointer to power handler
- * @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
- * @rp_lock : serialization lock for resource pool
- * @rp_head : list of active resource pool
- * @scl3_cfg_lut : qseed3 lut config
*/
struct dpu_crtc {
struct drm_crtc base;
char name[DPU_CRTC_NAME_SIZE];
- /* HW Resources reserved for the crtc */
- u32 num_ctls;
- u32 num_mixers;
- bool mixers_swapped;
- struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
- struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
-
struct drm_pending_vblank_event *event;
u32 vsync_count;
@@ -186,8 +151,6 @@
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
- bool vblank_requested;
- bool suspend;
bool enabled;
struct list_head feature_list;
@@ -196,8 +159,6 @@
struct list_head ad_dirty;
struct list_head ad_active;
- struct mutex crtc_lock;
-
atomic_t frame_pending;
struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
@@ -206,77 +167,17 @@
/* for handling internal event thread */
spinlock_t event_lock;
- bool misr_enable;
- u32 misr_frame_count;
- u32 misr_data[CRTC_DUAL_MIXERS];
-
- struct dpu_power_handle *phandle;
- struct dpu_power_event *power_event;
struct dpu_core_perf_params cur_perf;
- struct mutex rp_lock;
- struct list_head rp_head;
-
struct dpu_crtc_smmu_state_data smmu_state;
};
#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
/**
- * struct dpu_crtc_res_ops - common operations for crtc resources
- * @get: get given resource
- * @put: put given resource
- */
-struct dpu_crtc_res_ops {
- void *(*get)(void *val, u32 type, u64 tag);
- void (*put)(void *val);
-};
-
-#define DPU_CRTC_RES_FLAG_FREE BIT(0)
-
-/**
- * struct dpu_crtc_res - definition of crtc resources
- * @list: list of crtc resource
- * @type: crtc resource type
- * @tag: unique identifier per type
- * @refcount: reference/usage count
- * @ops: callback operations
- * @val: resource handle associated with type/tag
- * @flags: customization flags
- */
-struct dpu_crtc_res {
- struct list_head list;
- u32 type;
- u64 tag;
- atomic_t refcount;
- struct dpu_crtc_res_ops ops;
- void *val;
- u32 flags;
-};
-
-/**
- * dpu_crtc_respool - crtc resource pool
- * @rp_lock: pointer to serialization lock
- * @rp_head: pointer to head of active resource pools of this crtc
- * @rp_list: list of crtc resource pool
- * @sequence_id: sequence identifier, incremented per state duplication
- * @res_list: list of resource managed by this resource pool
- * @ops: resource operations for parent resource pool
- */
-struct dpu_crtc_respool {
- struct mutex *rp_lock;
- struct list_head *rp_head;
- struct list_head rp_list;
- u32 sequence_id;
- struct list_head res_list;
- struct dpu_crtc_res_ops ops;
-};
-
-/**
* struct dpu_crtc_state - dpu container for atomic crtc state
* @base: Base drm crtc state structure
- * @is_ppsplit : Whether current topology requires PPSplit special handling
* @bw_control : true if bw/clk controlled by core bw/clk properties
* @bw_split_vote : true if bw controlled by llcc/dram bw properties
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
@@ -285,69 +186,40 @@
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @new_perf: new performance state being requested
+ * @num_mixers : Number of mixers in use
+ * @mixers : List of active mixers
+ * @num_ctls : Number of ctl paths in use
+ * @hw_ctls : List of active ctl paths
*/
struct dpu_crtc_state {
struct drm_crtc_state base;
bool bw_control;
bool bw_split_vote;
-
- bool is_ppsplit;
struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
uint64_t input_fence_timeout_ns;
struct dpu_core_perf_params new_perf;
- struct dpu_crtc_respool rp;
+
+ /* HW Resources reserved for the crtc */
+ u32 num_mixers;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+ u32 num_ctls;
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
};
#define to_dpu_crtc_state(x) \
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_get_mixer_width - get the mixer width
- * Mixer width will be same as panel width(/2 for split)
- */
-static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
- struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
-{
- u32 mixer_width;
-
- if (!dpu_crtc || !cstate || !mode)
- return 0;
-
- mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
- mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
-
- return mixer_width;
-}
-
-/**
- * dpu_crtc_get_mixer_height - get the mixer height
- * Mixer height will be same as panel height
- */
-static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
- struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
-{
- if (!dpu_crtc || !cstate || !mode)
- return 0;
-
- return mode->vdisplay;
-}
-
-/**
* dpu_crtc_frame_pending - retun the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return -EINVAL;
-
- dpu_crtc = to_dpu_crtc(crtc);
- return atomic_read(&dpu_crtc->frame_pending);
+ return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
/**
@@ -358,6 +230,12 @@
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
+/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
*/
@@ -366,18 +244,18 @@
/**
* dpu_crtc_complete_commit - callback signalling completion of current commit
* @crtc: Pointer to drm crtc object
- * @old_state: Pointer to drm crtc old state object
*/
-void dpu_crtc_complete_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state);
+void dpu_crtc_complete_commit(struct drm_crtc *crtc);
/**
* dpu_crtc_init - create a new crtc object
* @dev: dpu device
* @plane: base plane
+ * @cursor: cursor plane
* @Return: new crtc object or error
*/
-struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor);
/**
* dpu_crtc_register_custom_event - api for enabling/disabling crtc event
@@ -402,22 +280,7 @@
static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
struct drm_crtc *crtc)
{
- struct dpu_crtc_state *cstate =
- crtc ? to_dpu_crtc_state(crtc->state) : NULL;
-
- if (!cstate)
- return NRT_CLIENT;
-
- return RT_CLIENT;
-}
-
-/**
- * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
- * @crtc: Pointer to crtc
- */
-static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
-{
- return crtc ? crtc->enabled : false;
+ return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
}
#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
deleted file mode 100644
index e741d26..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
+++ /dev/null
@@ -1,2393 +0,0 @@
-/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-#include <linux/list_sort.h>
-#include <linux/pm_runtime.h>
-
-#include "dpu_dbg.h"
-#include "disp/dpu1/dpu_hw_catalog.h"
-
-
-#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
-#define REG_BASE_NAME_LEN 80
-
-#define DBGBUS_FLAGS_DSPP BIT(0)
-#define DBGBUS_DSPP_STATUS 0x34C
-
-#define DBGBUS_NAME_DPU "dpu"
-#define DBGBUS_NAME_VBIF_RT "vbif_rt"
-
-/* offsets from dpu top address for the debug buses */
-#define DBGBUS_SSPP0 0x188
-#define DBGBUS_AXI_INTF 0x194
-#define DBGBUS_SSPP1 0x298
-#define DBGBUS_DSPP 0x348
-#define DBGBUS_PERIPH 0x418
-
-#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
-
-/* following offsets are with respect to MDP VBIF base for DBG BUS access */
-#define MMSS_VBIF_CLKON 0x4
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
-#define MMSS_VBIF_TEST_BUS_OUT 0x230
-
-/* Vbif error info */
-#define MMSS_VBIF_PND_ERR 0x190
-#define MMSS_VBIF_SRC_ERR 0x194
-#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
-#define MMSS_VBIF_ERR_INFO 0X1a0
-#define MMSS_VBIF_ERR_INFO_1 0x1a4
-#define MMSS_VBIF_CLIENT_NUM 14
-
-/**
- * struct dpu_dbg_reg_base - register region base.
- * may sub-ranges: sub-ranges are used for dumping
- * or may not have sub-ranges: dumping is base -> max_offset
- * @reg_base_head: head of this node
- * @name: register base name
- * @base: base pointer
- * @off: cached offset of region for manual register dumping
- * @cnt: cached range of region for manual register dumping
- * @max_offset: length of region
- * @buf: buffer used for manual register dumping
- * @buf_len: buffer length used for manual register dumping
- * @cb: callback for external dump function, null if not defined
- * @cb_ptr: private pointer to callback function
- */
-struct dpu_dbg_reg_base {
- struct list_head reg_base_head;
- char name[REG_BASE_NAME_LEN];
- void __iomem *base;
- size_t off;
- size_t cnt;
- size_t max_offset;
- char *buf;
- size_t buf_len;
- void (*cb)(void *ptr);
- void *cb_ptr;
-};
-
-struct dpu_debug_bus_entry {
- u32 wr_addr;
- u32 block_id;
- u32 test_id;
- void (*analyzer)(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val);
-};
-
-struct vbif_debug_bus_entry {
- u32 disable_bus_addr;
- u32 block_bus_addr;
- u32 bit_offset;
- u32 block_cnt;
- u32 test_pnt_start;
- u32 test_pnt_cnt;
-};
-
-struct dpu_dbg_debug_bus_common {
- char *name;
- u32 enable_mask;
- bool include_in_deferred_work;
- u32 flags;
- u32 entries_size;
- u32 *dumped_content;
-};
-
-struct dpu_dbg_dpu_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct dpu_debug_bus_entry *entries;
- u32 top_blk_off;
-};
-
-struct dpu_dbg_vbif_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct vbif_debug_bus_entry *entries;
-};
-
-/**
- * struct dpu_dbg_base - global dpu debug base structure
- * @reg_base_list: list of register dumping regions
- * @dev: device pointer
- * @dump_work: work struct for deferring register dump work to separate thread
- * @dbgbus_dpu: debug bus structure for the dpu
- * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
- */
-static struct dpu_dbg_base {
- struct list_head reg_base_list;
- struct device *dev;
-
- struct work_struct dump_work;
-
- struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
- struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
-} dpu_dbg_base;
-
-static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & 0xFFF000))
- return;
-
- dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
- { DBGBUS_SSPP0, 85, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
- { DBGBUS_SSPP1, 85, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* cursor 1 */
- { DBGBUS_SSPP1, 80, 0 },
- { DBGBUS_SSPP1, 80, 1 },
- { DBGBUS_SSPP1, 80, 2 },
- { DBGBUS_SSPP1, 80, 3 },
- { DBGBUS_SSPP1, 80, 4 },
- { DBGBUS_SSPP1, 80, 5 },
- { DBGBUS_SSPP1, 80, 6 },
- { DBGBUS_SSPP1, 80, 7 },
-
- { DBGBUS_SSPP1, 81, 0 },
- { DBGBUS_SSPP1, 81, 1 },
- { DBGBUS_SSPP1, 81, 2 },
- { DBGBUS_SSPP1, 81, 3 },
- { DBGBUS_SSPP1, 81, 4 },
- { DBGBUS_SSPP1, 81, 5 },
- { DBGBUS_SSPP1, 81, 6 },
- { DBGBUS_SSPP1, 81, 7 },
-
- { DBGBUS_SSPP1, 82, 0 },
- { DBGBUS_SSPP1, 82, 1 },
- { DBGBUS_SSPP1, 82, 2 },
- { DBGBUS_SSPP1, 82, 3 },
- { DBGBUS_SSPP1, 82, 4 },
- { DBGBUS_SSPP1, 82, 5 },
- { DBGBUS_SSPP1, 82, 6 },
- { DBGBUS_SSPP1, 82, 7 },
-
- { DBGBUS_SSPP1, 83, 0 },
- { DBGBUS_SSPP1, 83, 1 },
- { DBGBUS_SSPP1, 83, 2 },
- { DBGBUS_SSPP1, 83, 3 },
- { DBGBUS_SSPP1, 83, 4 },
- { DBGBUS_SSPP1, 83, 5 },
- { DBGBUS_SSPP1, 83, 6 },
- { DBGBUS_SSPP1, 83, 7 },
-
- { DBGBUS_SSPP1, 84, 0 },
- { DBGBUS_SSPP1, 84, 1 },
- { DBGBUS_SSPP1, 84, 2 },
- { DBGBUS_SSPP1, 84, 3 },
- { DBGBUS_SSPP1, 84, 4 },
- { DBGBUS_SSPP1, 84, 5 },
- { DBGBUS_SSPP1, 84, 6 },
- { DBGBUS_SSPP1, 84, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 0},
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 0},
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 0},
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 0},
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 0},
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 0},
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 0},
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 0},
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 0},
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 0},
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 0},
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 0},
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 0},
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 0},
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 9, 0},
- { DBGBUS_SSPP0, 9, 1},
- { DBGBUS_SSPP0, 9, 3},
- { DBGBUS_SSPP0, 29, 0},
- { DBGBUS_SSPP0, 29, 1},
- { DBGBUS_SSPP0, 29, 3},
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 9, 0},
- { DBGBUS_SSPP1, 9, 1},
- { DBGBUS_SSPP1, 9, 3},
- { DBGBUS_SSPP1, 29, 0},
- { DBGBUS_SSPP1, 29, 1},
- { DBGBUS_SSPP1, 29, 3},
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- { DBGBUS_PERIPH, 60, 0},
-};
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 84, 1},
- { DBGBUS_DSPP, 84, 2},
- { DBGBUS_DSPP, 84, 3},
- { DBGBUS_DSPP, 84, 4},
- { DBGBUS_DSPP, 84, 5},
- { DBGBUS_DSPP, 84, 6},
- { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 85, 1},
- { DBGBUS_DSPP, 85, 2},
- { DBGBUS_DSPP, 85, 3},
- { DBGBUS_DSPP, 85, 4},
- { DBGBUS_DSPP, 85, 5},
- { DBGBUS_DSPP, 85, 6},
- { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 86, 1},
- { DBGBUS_DSPP, 86, 2},
- { DBGBUS_DSPP, 86, 3},
- { DBGBUS_DSPP, 86, 4},
- { DBGBUS_DSPP, 86, 5},
- { DBGBUS_DSPP, 86, 6},
- { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 87, 1},
- { DBGBUS_DSPP, 87, 2},
- { DBGBUS_DSPP, 87, 3},
- { DBGBUS_DSPP, 87, 4},
- { DBGBUS_DSPP, 87, 5},
- { DBGBUS_DSPP, 87, 6},
- { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 88, 1},
- { DBGBUS_DSPP, 88, 2},
- { DBGBUS_DSPP, 88, 3},
- { DBGBUS_DSPP, 88, 4},
- { DBGBUS_DSPP, 88, 5},
- { DBGBUS_DSPP, 88, 6},
- { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 89, 1},
- { DBGBUS_DSPP, 89, 2},
- { DBGBUS_DSPP, 89, 3},
- { DBGBUS_DSPP, 89, 4},
- { DBGBUS_DSPP, 89, 5},
- { DBGBUS_DSPP, 89, 6},
- { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 90, 1},
- { DBGBUS_DSPP, 90, 2},
- { DBGBUS_DSPP, 90, 3},
- { DBGBUS_DSPP, 90, 4},
- { DBGBUS_DSPP, 90, 5},
- { DBGBUS_DSPP, 90, 6},
- { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 91, 1},
- { DBGBUS_DSPP, 91, 2},
- { DBGBUS_DSPP, 91, 3},
- { DBGBUS_DSPP, 91, 4},
- { DBGBUS_DSPP, 91, 5},
- { DBGBUS_DSPP, 91, 6},
- { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 92, 1},
- { DBGBUS_DSPP, 92, 2},
- { DBGBUS_DSPP, 92, 3},
- { DBGBUS_DSPP, 92, 4},
- { DBGBUS_DSPP, 92, 5},
- { DBGBUS_DSPP, 92, 6},
- { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 93, 1},
- { DBGBUS_DSPP, 93, 2},
- { DBGBUS_DSPP, 93, 3},
- { DBGBUS_DSPP, 93, 4},
- { DBGBUS_DSPP, 93, 5},
- { DBGBUS_DSPP, 93, 6},
- { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 94, 1},
- { DBGBUS_DSPP, 94, 2},
- { DBGBUS_DSPP, 94, 3},
- { DBGBUS_DSPP, 94, 4},
- { DBGBUS_DSPP, 94, 5},
- { DBGBUS_DSPP, 94, 6},
- { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 95, 1},
- { DBGBUS_DSPP, 95, 2},
- { DBGBUS_DSPP, 95, 3},
- { DBGBUS_DSPP, 95, 4},
- { DBGBUS_DSPP, 95, 5},
- { DBGBUS_DSPP, 95, 6},
- { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
-
- /* LM5 */
- { DBGBUS_DSPP, 110, 1},
- { DBGBUS_DSPP, 110, 2},
- { DBGBUS_DSPP, 110, 3},
- { DBGBUS_DSPP, 110, 4},
- { DBGBUS_DSPP, 110, 5},
- { DBGBUS_DSPP, 110, 6},
- { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 111, 1},
- { DBGBUS_DSPP, 111, 2},
- { DBGBUS_DSPP, 111, 3},
- { DBGBUS_DSPP, 111, 4},
- { DBGBUS_DSPP, 111, 5},
- { DBGBUS_DSPP, 111, 6},
- { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 112, 1},
- { DBGBUS_DSPP, 112, 2},
- { DBGBUS_DSPP, 112, 3},
- { DBGBUS_DSPP, 112, 4},
- { DBGBUS_DSPP, 112, 5},
- { DBGBUS_DSPP, 112, 6},
- { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 113, 1},
- { DBGBUS_DSPP, 113, 2},
- { DBGBUS_DSPP, 113, 3},
- { DBGBUS_DSPP, 113, 4},
- { DBGBUS_DSPP, 113, 5},
- { DBGBUS_DSPP, 113, 6},
- { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 114, 1},
- { DBGBUS_DSPP, 114, 2},
- { DBGBUS_DSPP, 114, 3},
- { DBGBUS_DSPP, 114, 4},
- { DBGBUS_DSPP, 114, 5},
- { DBGBUS_DSPP, 114, 6},
- { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 115, 1},
- { DBGBUS_DSPP, 115, 2},
- { DBGBUS_DSPP, 115, 3},
- { DBGBUS_DSPP, 115, 4},
- { DBGBUS_DSPP, 115, 5},
- { DBGBUS_DSPP, 115, 6},
- { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 116, 1},
- { DBGBUS_DSPP, 116, 2},
- { DBGBUS_DSPP, 116, 3},
- { DBGBUS_DSPP, 116, 4},
- { DBGBUS_DSPP, 116, 5},
- { DBGBUS_DSPP, 116, 6},
- { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 117, 1},
- { DBGBUS_DSPP, 117, 2},
- { DBGBUS_DSPP, 117, 3},
- { DBGBUS_DSPP, 117, 4},
- { DBGBUS_DSPP, 117, 5},
- { DBGBUS_DSPP, 117, 6},
- { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 118, 1},
- { DBGBUS_DSPP, 118, 2},
- { DBGBUS_DSPP, 118, 3},
- { DBGBUS_DSPP, 118, 4},
- { DBGBUS_DSPP, 118, 5},
- { DBGBUS_DSPP, 118, 6},
- { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 119, 1},
- { DBGBUS_DSPP, 119, 2},
- { DBGBUS_DSPP, 119, 3},
- { DBGBUS_DSPP, 119, 4},
- { DBGBUS_DSPP, 119, 5},
- { DBGBUS_DSPP, 119, 6},
- { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 120, 1},
- { DBGBUS_DSPP, 120, 2},
- { DBGBUS_DSPP, 120, 3},
- { DBGBUS_DSPP, 120, 4},
- { DBGBUS_DSPP, 120, 5},
- { DBGBUS_DSPP, 120, 6},
- { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- /* intf0-3 */
- { DBGBUS_PERIPH, 0, 0},
- { DBGBUS_PERIPH, 1, 0},
- { DBGBUS_PERIPH, 2, 0},
- { DBGBUS_PERIPH, 3, 0},
-
- /* te counter wrapper */
- { DBGBUS_PERIPH, 60, 0},
-
- /* dsc0 */
- { DBGBUS_PERIPH, 47, 0},
- { DBGBUS_PERIPH, 47, 1},
- { DBGBUS_PERIPH, 47, 2},
- { DBGBUS_PERIPH, 47, 3},
- { DBGBUS_PERIPH, 47, 4},
- { DBGBUS_PERIPH, 47, 5},
- { DBGBUS_PERIPH, 47, 6},
- { DBGBUS_PERIPH, 47, 7},
-
- /* dsc1 */
- { DBGBUS_PERIPH, 48, 0},
- { DBGBUS_PERIPH, 48, 1},
- { DBGBUS_PERIPH, 48, 2},
- { DBGBUS_PERIPH, 48, 3},
- { DBGBUS_PERIPH, 48, 4},
- { DBGBUS_PERIPH, 48, 5},
- { DBGBUS_PERIPH, 48, 6},
- { DBGBUS_PERIPH, 48, 7},
-
- /* dsc2 */
- { DBGBUS_PERIPH, 51, 0},
- { DBGBUS_PERIPH, 51, 1},
- { DBGBUS_PERIPH, 51, 2},
- { DBGBUS_PERIPH, 51, 3},
- { DBGBUS_PERIPH, 51, 4},
- { DBGBUS_PERIPH, 51, 5},
- { DBGBUS_PERIPH, 51, 6},
- { DBGBUS_PERIPH, 51, 7},
-
- /* dsc3 */
- { DBGBUS_PERIPH, 52, 0},
- { DBGBUS_PERIPH, 52, 1},
- { DBGBUS_PERIPH, 52, 2},
- { DBGBUS_PERIPH, 52, 3},
- { DBGBUS_PERIPH, 52, 4},
- { DBGBUS_PERIPH, 52, 5},
- { DBGBUS_PERIPH, 52, 6},
- { DBGBUS_PERIPH, 52, 7},
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* cdwn */
- { DBGBUS_PERIPH, 80, 0},
- { DBGBUS_PERIPH, 80, 1},
- { DBGBUS_PERIPH, 80, 2},
-
- { DBGBUS_PERIPH, 81, 0},
- { DBGBUS_PERIPH, 81, 1},
- { DBGBUS_PERIPH, 81, 2},
-
- { DBGBUS_PERIPH, 82, 0},
- { DBGBUS_PERIPH, 82, 1},
- { DBGBUS_PERIPH, 82, 2},
- { DBGBUS_PERIPH, 82, 3},
- { DBGBUS_PERIPH, 82, 4},
- { DBGBUS_PERIPH, 82, 5},
- { DBGBUS_PERIPH, 82, 6},
- { DBGBUS_PERIPH, 82, 7},
-
- /* hdmi */
- { DBGBUS_PERIPH, 68, 0},
- { DBGBUS_PERIPH, 68, 1},
- { DBGBUS_PERIPH, 68, 2},
- { DBGBUS_PERIPH, 68, 3},
- { DBGBUS_PERIPH, 68, 4},
- { DBGBUS_PERIPH, 68, 5},
-
- /* edp */
- { DBGBUS_PERIPH, 69, 0},
- { DBGBUS_PERIPH, 69, 1},
- { DBGBUS_PERIPH, 69, 2},
- { DBGBUS_PERIPH, 69, 3},
- { DBGBUS_PERIPH, 69, 4},
- { DBGBUS_PERIPH, 69, 5},
-
- /* dsi0 */
- { DBGBUS_PERIPH, 70, 0},
- { DBGBUS_PERIPH, 70, 1},
- { DBGBUS_PERIPH, 70, 2},
- { DBGBUS_PERIPH, 70, 3},
- { DBGBUS_PERIPH, 70, 4},
- { DBGBUS_PERIPH, 70, 5},
-
- /* dsi1 */
- { DBGBUS_PERIPH, 71, 0},
- { DBGBUS_PERIPH, 71, 1},
- { DBGBUS_PERIPH, 71, 2},
- { DBGBUS_PERIPH, 71, 3},
- { DBGBUS_PERIPH, 71, 4},
- { DBGBUS_PERIPH, 71, 5},
-};
-
-static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
- {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
- {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
- {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
-};
-
-/**
- * _dpu_dbg_enable_power - use callback to turn power on for hw register access
- * @enable: whether to turn power on or off
- */
-static inline void _dpu_dbg_enable_power(int enable)
-{
- if (enable)
- pm_runtime_get_sync(dpu_dbg_base.dev);
- else
- pm_runtime_put_sync(dpu_dbg_base.dev);
-}
-
-static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 status = 0;
- struct dpu_debug_bus_entry *head;
- dma_addr_t dma = 0;
- int list_size;
- int i;
- u32 offset;
- void __iomem *mem_base = NULL;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base + bus->top_blk_off;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dump_mem = &bus->cmn.dumped_content;
-
- /* will keep in memory 4 entries of 4 bytes each */
- list_size = (bus->cmn.entries_size * 4 * 4);
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &dma, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
- for (i = 0; i < bus->cmn.entries_size; i++) {
- head = bus->entries + i;
- writel_relaxed(TEST_MASK(head->block_id, head->test_id),
- mem_base + head->wr_addr);
- wmb(); /* make sure test bits were written */
-
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
- offset = DBGBUS_DSPP_STATUS;
- /* keep DSPP test point enabled */
- if (head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
- } else {
- offset = head->wr_addr + 0x4;
- }
-
- status = readl_relaxed(mem_base + offset);
-
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "waddr=0x%x blk=%d tst=%d val=0x%x\n",
- head->wr_addr, head->block_id,
- head->test_id, status);
-
- if (dump_addr && in_mem) {
- dump_addr[i*4] = head->wr_addr;
- dump_addr[i*4 + 1] = head->block_id;
- dump_addr[i*4 + 2] = head->test_id;
- dump_addr[i*4 + 3] = status;
- }
-
- if (head->analyzer)
- head->analyzer(mem_base, head, status);
-
- /* Disable debug bus once we are done */
- writel_relaxed(0, mem_base + head->wr_addr);
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
- head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
- }
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-static void _dpu_dbg_dump_vbif_debug_bus_entry(
- struct vbif_debug_bus_entry *head, void __iomem *mem_base,
- u32 *dump_addr, bool in_log)
-{
- int i, j;
- u32 val;
-
- if (!dump_addr && !in_log)
- return;
-
- for (i = 0; i < head->block_cnt; i++) {
- writel_relaxed(1 << (i + head->bit_offset),
- mem_base + head->block_bus_addr);
- /* make sure that current bus blcok enable */
- wmb();
- for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
- writel_relaxed(j, mem_base + head->block_bus_addr + 4);
- /* make sure that test point is enabled */
- wmb();
- val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
- if (dump_addr) {
- *dump_addr++ = head->block_bus_addr;
- *dump_addr++ = i;
- *dump_addr++ = j;
- *dump_addr++ = val;
- }
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
- head->block_bus_addr, i, j, val);
- }
- }
-}
-
-static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 value, d0, d1;
- unsigned long reg, reg1, reg2;
- struct vbif_debug_bus_entry *head;
- dma_addr_t dma = 0;
- int i, list_size = 0;
- void __iomem *mem_base = NULL;
- struct vbif_debug_bus_entry *dbg_bus;
- u32 bus_size;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dbg_bus = bus->entries;
- bus_size = bus->cmn.entries_size;
- list_size = bus->cmn.entries_size;
- dump_mem = &bus->cmn.dumped_content;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (!dump_mem || !dbg_bus || !bus_size || !list_size)
- return;
-
- /* allocate memory for each test point */
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
- list_size += (head->block_cnt * head->test_pnt_cnt);
- }
-
- /* 4 bytes * 4 entries for each test point*/
- list_size *= 16;
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &dma, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
-
- value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
- writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
-
- /* make sure that vbif core is on */
- wmb();
-
- /**
- * Extract VBIF error info based on XIN halt and error status.
- * If the XIN client is not in HALT state, or an error is detected,
- * then retrieve the VBIF error info for it.
- */
- reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
- reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
- reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
- dev_err(dpu_dbg_base.dev,
- "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
- reg, reg1, reg2);
- reg >>= 16;
- reg &= ~(reg1 | reg2);
- for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
- if (!test_bit(0, ®)) {
- writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
- /* make sure reg write goes through */
- wmb();
-
- d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
- d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
-
- dev_err(dpu_dbg_base.dev,
- "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
- i, d0, d1);
- }
- reg >>= 1;
- }
-
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
-
- writel_relaxed(0, mem_base + head->disable_bus_addr);
- writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
- /* make sure that other bus is off */
- wmb();
-
- _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
- in_log);
- if (dump_addr)
- dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
- }
-
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-/**
- * _dpu_dump_array - dump array of register bases
- * @name: string indicating origin of dump
- * @dump_dbgbus_dpu: whether to dump the dpu debug bus
- * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
- */
-static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (dump_dbgbus_dpu)
- _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
-
- if (dump_dbgbus_vbif_rt)
- _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
-}
-
-/**
- * _dpu_dump_work - deferred dump work function
- * @work: work structure
- */
-static void _dpu_dump_work(struct work_struct *work)
-{
- _dpu_dump_array("dpudump_workitem",
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
-}
-
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (queue_work && work_pending(&dpu_dbg_base.dump_work))
- return;
-
- if (!queue_work) {
- _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
- return;
- }
-
- /* schedule work to dump later */
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
- dump_dbgbus_vbif_rt;
- schedule_work(&dpu_dbg_base.dump_work);
-}
-
-/*
- * dpu_dbg_debugfs_open - debugfs open handler for debug dump
- * @inode: debugfs inode
- * @file: file handle
- */
-static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
-{
- /* non-seekable */
- file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- file->private_data = inode->i_private;
- return 0;
-}
-
-/**
- * dpu_dbg_dump_write - debugfs write handler for debug dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t dpu_dbg_dump_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- _dpu_dump_array("dump_debugfs", true, true);
- return count;
-}
-
-static const struct file_operations dpu_dbg_dump_fops = {
- .open = dpu_dbg_debugfs_open,
- .write = dpu_dbg_dump_write,
-};
-
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
- char debug_name[80] = "";
-
- if (!debugfs_root)
- return -EINVAL;
-
- debugfs_create_file("dump", 0600, debugfs_root, NULL,
- &dpu_dbg_dump_fops);
-
- if (dbg->dbgbus_dpu.entries) {
- dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_dpu.cmn.name);
- dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_dpu.cmn.enable_mask);
- }
-
- if (dbg->dbgbus_vbif_rt.entries) {
- dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_vbif_rt.cmn.name);
- dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_vbif_rt.cmn.enable_mask);
- }
-
- return 0;
-}
-
-static void _dpu_dbg_debugfs_destroy(void)
-{
-}
-
-void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-
- memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
- memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
-
- if (IS_MSM8998_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
- dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
- dbg->dbgbus_dpu.cmn.entries_size =
- ARRAY_SIZE(dbg_bus_dpu_sdm845);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- /* vbif is unchanged vs 8998 */
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else {
- pr_err("unsupported chipset id %X\n", hwversion);
- }
-}
-
-int dpu_dbg_init(struct device *dev)
-{
- if (!dev) {
- pr_err("invalid params\n");
- return -EINVAL;
- }
-
- INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
- dpu_dbg_base.dev = dev;
-
- INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
-
- return 0;
-}
-
-/**
- * dpu_dbg_destroy - destroy dpu debug facilities
- */
-void dpu_dbg_destroy(void)
-{
- _dpu_dbg_debugfs_destroy();
-}
-
-void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
- dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
deleted file mode 100644
index 1e6fa94..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef DPU_DBG_H_
-#define DPU_DBG_H_
-
-#include <stdarg.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-enum dpu_dbg_dump_flag {
- DPU_DBG_DUMP_IN_LOG = BIT(0),
- DPU_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-#if defined(CONFIG_DEBUG_FS)
-
-/**
- * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
- * @hwversion: Chipset revision
- */
-void dpu_dbg_init_dbg_buses(u32 hwversion);
-
-/**
- * dpu_dbg_init - initialize global dpu debug facilities: regdump
- * @dev: device handle
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_init(struct device *dev);
-
-/**
- * dpu_dbg_debugfs_register - register entries at the given debugfs dir
- * @debugfs_root: debugfs root in which to create dpu debug entries
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
-
-/**
- * dpu_dbg_destroy - destroy the global dpu debug facilities
- * Returns: none
- */
-void dpu_dbg_destroy(void);
-
-/**
- * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
- * @queue_work: whether to queue the dumping work to the work_struct
- * @name: string indicating origin of dump
- * @dump_dbgbus: dump the dpu debug bus
- * @dump_vbif_rt: dump the vbif rt bus
- * Returns: none
- */
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt);
-
-/**
- * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
- * address of the top registers. Used for accessing debug bus controls.
- * @blk_off: offset from mdss base of the top block
- */
-void dpu_dbg_set_dpu_top_offset(u32 blk_off);
-
-#else
-
-static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int dpu_dbg_init(struct device *dev)
-{
- return 0;
-}
-
-static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- return 0;
-}
-
-static inline void dpu_dbg_destroy(void)
-{
-}
-
-static inline void dpu_dbg_dump(bool queue_work, const char *name,
- bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
-{
-}
-
-static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
-#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index ec3fd67..d82ea99 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1,30 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-#include <linux/kthread.h>
#include <linux/debugfs.h>
+#include <linux/kthread.h>
#include <linux/seq_file.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_probe_helper.h>
+
#include "msm_drv.h"
#include "dpu_kms.h"
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
@@ -65,12 +56,13 @@
#define MAX_CHANNELS_PER_ENC 2
-#define MISR_BUFF_SIZE 256
-
#define IDLE_SHORT_TIMEOUT 1
#define MAX_VDISPLAY_SPLIT 1080
+/* timeout in frames waiting for frame done */
+#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
+
/**
* enum dpu_enc_rc_events - events for resource control state machine
* @DPU_ENC_RC_EVENT_KICKOFF:
@@ -132,8 +124,9 @@
* Virtual encoder defers as much as possible to the physical encoders.
* Virtual encoder registers itself with the DRM Framework as the encoder.
* @base: drm_encoder base class for registration with DRM
- * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @bus_scaling_client: Client handle to the bus scaling interface
+ * @enabled: True if the encoder is active, protected by enc_lock
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
@@ -143,26 +136,26 @@
* @intfs_swapped Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
- * @crtc_vblank_cb: Callback into the upper layer / CRTC for
- * notification of the VBLANK
- * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc: Pointer to the currently assigned crtc. Normally you
+ * would use crtc->state->encoder_mask to determine the
+ * link between encoder/crtc. However in this case we need
+ * to track crtc in the disable() hook which is called
+ * _after_ encoder_mask is cleared.
* @crtc_kickoff_cb: Callback into CRTC that will flush & start
* all CTL paths
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
* @debugfs_root: Debug file system root file node
- * @enc_lock: Lock around physical encoder create/destroy and
- access.
+ * @enc_lock: Lock around physical encoder
+ * create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
* busy processing current command.
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
- * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timeout_ms: frame done timeout in ms
* @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
- * @misr_enable: misr enable/disable status
- * @misr_frame_count: misr frame count before start capturing the data
* @idle_pc_supported: indicate if idle power collaps is supported
* @rc_lock: resource control mutex lock to protect
* virt encoder over various state changes
@@ -179,17 +172,17 @@
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
- uint32_t display_num_of_h_tiles;
+ bool enabled;
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
+ struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
bool intfs_swapped;
- void (*crtc_vblank_cb)(void *);
- void *crtc_vblank_cb_data;
+ struct drm_crtc *crtc;
struct dentry *debugfs_root;
struct mutex enc_lock;
@@ -197,18 +190,16 @@
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
- atomic_t frame_done_timeout;
+ atomic_t frame_done_timeout_ms;
struct timer_list frame_done_timer;
struct timer_list vsync_event_timer;
struct msm_display_info disp_info;
- bool misr_enable;
- u32 misr_frame_count;
bool idle_pc_supported;
struct mutex rc_lock;
enum dpu_enc_rc_states rc_state;
- struct kthread_delayed_work delayed_off_work;
+ struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work;
struct msm_display_topology topology;
bool mode_set_complete;
@@ -217,39 +208,6 @@
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
-static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
- bool enable)
-{
- struct drm_encoder *drm_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!dpu_enc) {
- DPU_ERROR("invalid dpu enc\n");
- return -EINVAL;
- }
-
- drm_enc = &dpu_enc->base;
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
- DPU_ERROR("drm device invalid\n");
- return -EINVAL;
- }
-
- priv = drm_enc->dev->dev_private;
- if (!priv->kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
-
- if (enable)
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- else
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- return 0;
-}
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
@@ -443,30 +401,22 @@
}
void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i = 0;
- if (!hw_res || !drm_enc || !conn_state) {
- DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
- drm_enc != 0, hw_res != 0, conn_state != 0);
- return;
- }
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
/* Query resources used by phys encs, expected to be without overlap */
memset(hw_res, 0, sizeof(*hw_res));
- hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys && phys->ops.get_hw_resources)
- phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ phys->ops.get_hw_resources(phys, hw_res);
}
}
@@ -523,7 +473,7 @@
hw_mdptop = phys_enc->hw_mdptop;
disp_info = &dpu_enc->disp_info;
- if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
return;
/**
@@ -564,8 +514,8 @@
list_for_each_entry(cur_mode, &connector->modes, head) {
if (cur_mode->vdisplay == adj_mode->vdisplay &&
- cur_mode->hdisplay == adj_mode->hdisplay &&
- cur_mode->vrefresh == adj_mode->vrefresh) {
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
@@ -658,14 +608,11 @@
if (drm_atomic_crtc_needs_modeset(crtc_state)
&& dpu_enc->mode_set_complete) {
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
- conn_state, topology, true);
+ topology, true);
dpu_enc->mode_set_complete = false;
}
}
- if (!ret)
- drm_mode_set_crtcinfo(adj_mode, 0);
-
trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
adj_mode->private_flags);
@@ -786,7 +733,6 @@
{
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
- struct msm_drm_thread *disp_thread;
bool is_vid_mode = false;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
@@ -799,12 +745,6 @@
is_vid_mode = dpu_enc->disp_info.capabilities &
MSM_DISPLAY_CAP_VID_MODE;
- if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
- DPU_ERROR("invalid crtc index\n");
- return -EINVAL;
- }
- disp_thread = &priv->disp_thread[drm_enc->crtc->index];
-
/*
* when idle_pc is not supported, process only KICKOFF, STOP and MODESET
* events and return early for other events (ie wb display).
@@ -821,8 +761,7 @@
switch (sw_event) {
case DPU_ENC_RC_EVENT_KICKOFF:
/* cancel delayed off work, if any */
- if (kthread_cancel_delayed_work_sync(
- &dpu_enc->delayed_off_work))
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -881,10 +820,8 @@
return 0;
}
- kthread_queue_delayed_work(
- &disp_thread->worker,
- &dpu_enc->delayed_off_work,
- msecs_to_jiffies(dpu_enc->idle_timeout));
+ queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
@@ -893,8 +830,7 @@
case DPU_ENC_RC_EVENT_PRE_STOP:
/* cancel delayed off work, if any */
- if (kthread_cancel_delayed_work_sync(
- &dpu_enc->delayed_off_work))
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -1014,10 +950,14 @@
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
- struct dpu_rm_hw_iter pp_iter;
+ struct drm_crtc *drm_crtc;
+ struct dpu_crtc_state *cstate;
+ struct dpu_rm_hw_iter hw_iter;
struct msm_display_topology topology;
- enum dpu_rm_topology_name topology_name;
- int i = 0, ret;
+ struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
+ struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm = 0, num_ctl = 0;
+ int i, j, ret;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
@@ -1045,44 +985,105 @@
return;
}
+ drm_for_each_crtc(drm_crtc, drm_enc->dev)
+ if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
+ break;
+
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
- conn->state, topology, false);
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
+ topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
"failed to reserve hw resources, %d\n", ret);
return;
}
- dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
}
- topology_name = dpu_rm_get_topology_name(topology);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+ hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
+ num_ctl++;
+ }
+
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+ hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
+ num_lm++;
+ }
+
+ cstate = to_dpu_crtc_state(drm_crtc->state);
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = hw_lm[i];
+ cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ }
+
+ cstate->num_mixers = num_lm;
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys) {
if (!dpu_enc->hw_pp[i]) {
- DPU_ERROR_ENC(dpu_enc,
- "invalid pingpong block for the encoder\n");
- return;
+ DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
+ "at idx: %d\n", i);
+ goto error;
}
+
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
+ "at idx: %d\n", i);
+ goto error;
+ }
+
phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = hw_ctl[i];
+
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+ DPU_HW_BLK_INTF);
+ for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ struct dpu_hw_intf *hw_intf;
+
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+
+ hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ if (hw_intf->idx == phys->intf_idx)
+ phys->hw_intf = hw_intf;
+ }
+
+ if (!phys->hw_intf) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no intf block assigned at idx: %d\n",
+ i);
+ goto error;
+ }
+
phys->connector = conn->state->connector;
- phys->topology_name = topology_name;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
}
dpu_enc->mode_set_complete = true;
+
+error:
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1109,12 +1110,6 @@
return;
}
- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
- dpu_enc->cur_master->hw_mdptop &&
- dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
- dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
- dpu_enc->cur_master->hw_mdptop);
-
if (dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
@@ -1124,34 +1119,30 @@
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
}
-void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc = NULL;
- int i;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
- dpu_enc = to_dpu_encoder_virt(drm_enc);
+ mutex_lock(&dpu_enc->enc_lock);
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ if (!dpu_enc->enabled)
+ goto out;
- if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
- phys->ops.restore(phys);
- }
-
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+ dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
_dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
- int i, ret = 0;
+ int ret = 0;
struct drm_display_mode *cur_mode = NULL;
if (!drm_enc) {
@@ -1159,55 +1150,33 @@
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
- dpu_enc->cur_master = NULL;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ /* always enable slave encoder before master */
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
+ dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
- if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
- DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
- dpu_enc->cur_master = phys;
- break;
- }
- }
-
- if (!dpu_enc->cur_master) {
- DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
- return;
- }
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
ret);
- return;
+ goto out;
}
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys)
- continue;
-
- if (phys != dpu_enc->cur_master) {
- if (phys->ops.enable)
- phys->ops.enable(phys);
- }
-
- if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
- MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
- phys->ops.setup_misr(phys, true,
- dpu_enc->misr_frame_count);
- }
-
- if (dpu_enc->cur_master->ops.enable)
- dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
-
_dpu_encoder_virt_enable_helper(drm_enc);
+
+ dpu_enc->enabled = true;
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1229,11 +1198,14 @@
return;
}
- mode = &drm_enc->crtc->state->adjusted_mode;
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->enabled = false;
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1252,7 +1224,7 @@
}
/* after phys waits for frame-done, should be no more frames pending */
- if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
del_timer_sync(&dpu_enc->frame_done_timer);
}
@@ -1264,11 +1236,11 @@
dpu_enc->phys_encs[i]->connector = NULL;
}
- dpu_enc->cur_master = NULL;
-
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
+
+ mutex_unlock(&dpu_enc->enc_lock);
}
static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
@@ -1299,8 +1271,8 @@
dpu_enc = to_dpu_encoder_virt(drm_enc);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- if (dpu_enc->crtc_vblank_cb)
- dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ if (dpu_enc->crtc)
+ dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
atomic_inc(&phy_enc->vsync_cnt);
@@ -1320,25 +1292,32 @@
DPU_ATRACE_END("encoder_underrun_callback");
}
-void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
- void (*vbl_cb)(void *), void *vbl_data)
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
- bool enable;
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ /* crtc should always be cleared before re-assigning */
+ WARN_ON(crtc && dpu_enc->crtc);
+ dpu_enc->crtc = crtc;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+ struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
int i;
- enable = vbl_cb ? true : false;
-
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- dpu_enc->crtc_vblank_cb = vbl_cb;
- dpu_enc->crtc_vblank_cb_data = vbl_data;
+ if (dpu_enc->crtc != crtc) {
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ return;
+ }
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1395,14 +1374,14 @@
/* One of the physical encoders has become idle */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] == ready_phys) {
- clear_bit(i, dpu_enc->frame_busy_mask);
trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
dpu_enc->frame_busy_mask[0]);
+ clear_bit(i, dpu_enc->frame_busy_mask);
}
}
if (!dpu_enc->frame_busy_mask[0]) {
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
del_timer(&dpu_enc->frame_done_timer);
dpu_encoder_resource_control(drm_enc,
@@ -1420,7 +1399,7 @@
}
}
-static void dpu_encoder_off_work(struct kthread_work *work)
+static void dpu_encoder_off_work(struct work_struct *work)
{
struct dpu_encoder_virt *dpu_enc = container_of(work,
struct dpu_encoder_virt, delayed_off_work.work);
@@ -1443,19 +1422,13 @@
* phys: Pointer to physical encoder structure
* extra_flush_bits: Additional bit mask to include in flush trigger
*/
-static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
u32 ret = UINT_MAX;
- if (!drm_enc || !phys) {
- DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
- drm_enc != 0, phys != 0);
- return;
- }
-
if (!phys->hw_pp) {
DPU_ERROR("invalid pingpong hw\n");
return;
@@ -1478,14 +1451,15 @@
ret = ctl->ops.get_pending_flush(ctl);
trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
- pending_kickoff_cnt, ctl->idx, ret);
+ pending_kickoff_cnt, ctl->idx,
+ extra_flush_bits, ret);
}
/**
* _dpu_encoder_trigger_start - trigger start for a physical encoder
* phys: Pointer to physical encoder structure
*/
-static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
@@ -1542,7 +1516,7 @@
return rc;
}
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_hw_ctl *ctl;
@@ -1562,10 +1536,8 @@
ctl->idx);
rc = ctl->ops.reset(ctl);
- if (rc) {
+ if (rc)
DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
- dpu_dbg_dump(false, __func__, true, true);
- }
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -1585,11 +1557,6 @@
uint32_t i, pending_flush;
unsigned long lock_flags;
- if (!dpu_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
pending_flush = 0x0;
/* update pending counts and trigger kickoff ctl flush atomically */
@@ -1606,8 +1573,14 @@
if (!ctl)
continue;
+ /*
+ * This is cleared in frame_done worker, which isn't invoked
+ * for async commits. So don't set this for async, since it'll
+ * roll over to the next commit.
+ */
if (phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask);
+
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
@@ -1707,8 +1680,7 @@
return line_time;
}
-static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
- ktime_t *wakeup_time)
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
{
struct drm_display_mode *mode;
struct dpu_encoder_virt *dpu_enc;
@@ -1795,7 +1767,7 @@
return;
}
- if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+ if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
return;
trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
@@ -1803,18 +1775,13 @@
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
- struct dpu_encoder_kickoff_params *params)
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
bool needs_hw_reset = false;
unsigned int i;
- if (!drm_enc || !params) {
- DPU_ERROR("invalid args\n");
- return;
- }
dpu_enc = to_dpu_encoder_virt(drm_enc);
trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
@@ -1825,7 +1792,7 @@
phys = dpu_enc->phys_encs[i];
if (phys) {
if (phys->ops.prepare_for_kickoff)
- phys->ops.prepare_for_kickoff(phys, params);
+ phys->ops.prepare_for_kickoff(phys);
if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true;
}
@@ -1838,9 +1805,7 @@
if (needs_hw_reset) {
trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.hw_reset)
- phys->ops.hw_reset(phys);
+ dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
}
@@ -1850,22 +1815,20 @@
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
ktime_t wakeup_time;
+ unsigned long timeout_ms;
unsigned int i;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
DPU_ATRACE_BEGIN("encoder_kickoff");
dpu_enc = to_dpu_encoder_virt(drm_enc);
trace_dpu_enc_kickoff(DRMID(drm_enc));
- atomic_set(&dpu_enc->frame_done_timeout,
- DPU_FRAME_DONE_TIMEOUT * 1000 /
- drm_enc->crtc->state->adjusted_mode.vrefresh);
- mod_timer(&dpu_enc->frame_done_timer, jiffies +
- ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
@@ -1877,8 +1840,8 @@
phys->ops.handle_post_kickoff(phys);
}
- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
- !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
+ !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
trace_dpu_enc_early_kickoff(DRMID(drm_enc),
ktime_to_ms(wakeup_time));
mod_timer(&dpu_enc->vsync_event_timer,
@@ -1910,14 +1873,9 @@
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = s->private;
int i;
- if (!s || !s->private)
- return -EINVAL;
-
- dpu_enc = s->private;
-
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1953,116 +1911,9 @@
return single_open(file, _dpu_encoder_status_show, inode->i_private);
}
-static ssize_t _dpu_encoder_misr_setup(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_encoder_virt *dpu_enc;
- int i = 0, rc;
- char buf[MISR_BUFF_SIZE + 1];
- size_t buff_copy;
- u32 frame_count, enable;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_enc = file->private_data;
-
- buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
- if (copy_from_user(buf, user_buf, buff_copy))
- return -EINVAL;
-
- buf[buff_copy] = 0; /* end of string */
-
- if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
- return -EINVAL;
-
- rc = _dpu_encoder_power_enable(dpu_enc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_enc->enc_lock);
- dpu_enc->misr_enable = enable;
- dpu_enc->misr_frame_count = frame_count;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys || !phys->ops.setup_misr)
- continue;
-
- phys->ops.setup_misr(phys, enable, frame_count);
- }
- mutex_unlock(&dpu_enc->enc_lock);
- _dpu_encoder_power_enable(dpu_enc, false);
-
- return count;
-}
-
-static ssize_t _dpu_encoder_misr_read(struct file *file,
- char __user *user_buff, size_t count, loff_t *ppos)
-{
- struct dpu_encoder_virt *dpu_enc;
- int i = 0, len = 0;
- char buf[MISR_BUFF_SIZE + 1] = {'\0'};
- int rc;
-
- if (*ppos)
- return 0;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_enc = file->private_data;
-
- rc = _dpu_encoder_power_enable(dpu_enc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_enc->enc_lock);
- if (!dpu_enc->misr_enable) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "disabled\n");
- goto buff_check;
- } else if (dpu_enc->disp_info.capabilities &
- ~MSM_DISPLAY_CAP_VID_MODE) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "unsupported\n");
- goto buff_check;
- }
-
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys || !phys->ops.collect_misr)
- continue;
-
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "Intf idx:%d\n", phys->intf_idx - INTF_0);
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
- phys->ops.collect_misr(phys));
- }
-
-buff_check:
- if (count <= len) {
- len = 0;
- goto end;
- }
-
- if (copy_to_user(user_buff, buf, len)) {
- len = -EFAULT;
- goto end;
- }
-
- *ppos += len; /* increase offset */
-
-end:
- mutex_unlock(&dpu_enc->enc_lock);
- _dpu_encoder_power_enable(dpu_enc, false);
- return len;
-}
-
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
int i;
@@ -2074,20 +1925,13 @@
.release = single_release,
};
- static const struct file_operations debugfs_misr_fops = {
- .open = simple_open,
- .read = _dpu_encoder_misr_read,
- .write = _dpu_encoder_misr_setup,
- };
-
char name[DPU_NAME_SIZE];
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -2096,16 +1940,11 @@
/* create overall sub-directory for the encoder */
dpu_enc->debugfs_root = debugfs_create_dir(name,
drm_enc->dev->primary->debugfs_root);
- if (!dpu_enc->debugfs_root)
- return -ENOMEM;
/* don't error check these */
debugfs_create_file("status", 0600,
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
- debugfs_create_file("misr_data", 0600,
- dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
-
for (i = 0; i < dpu_enc->num_phys_encs; i++)
if (dpu_enc->phys_encs[i] &&
dpu_enc->phys_encs[i]->ops.late_register)
@@ -2115,26 +1954,11 @@
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
- struct dpu_encoder_virt *dpu_enc;
-
- if (!drm_enc)
- return;
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
#else
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-}
#endif
static int dpu_encoder_late_register(struct drm_encoder *encoder)
@@ -2144,7 +1968,9 @@
static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
{
- _dpu_encoder_destroy_debugfs(encoder);
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
}
static int dpu_encoder_virt_add_phys_encs(
@@ -2193,6 +2019,11 @@
++dpu_enc->num_phys_encs;
}
+ if (params->split_role == ENC_ROLE_SLAVE)
+ dpu_enc->cur_slave = enc;
+ else
+ dpu_enc->cur_master = enc;
+
return 0;
}
@@ -2204,8 +2035,7 @@
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
- struct msm_display_info *disp_info,
- int *drm_enc_mode)
+ struct msm_display_info *disp_info)
{
int ret = 0;
int i = 0;
@@ -2218,6 +2048,8 @@
return -EINVAL;
}
+ dpu_enc->cur_master = NULL;
+
memset(&phys_params, 0, sizeof(phys_params));
phys_params.dpu_kms = dpu_kms;
phys_params.parent = &dpu_enc->base;
@@ -2226,24 +2058,17 @@
DPU_DEBUG("\n");
- if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
- *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ switch (disp_info->intf_type) {
+ case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;
- } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
- *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
- intf_type = INTF_HDMI;
- } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
- *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
- intf_type = INTF_DP;
- } else {
+ break;
+ default:
DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
return -EINVAL;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
- dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
-
DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
@@ -2321,7 +2146,7 @@
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
return;
- } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
return;
}
@@ -2356,32 +2181,27 @@
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *drm_enc = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
- int drm_enc_mode = DRM_MODE_ENCODER_NONE;
int ret = 0;
dpu_enc = to_dpu_encoder_virt(enc);
mutex_init(&dpu_enc->enc_lock);
- ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
- &drm_enc_mode);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
- dpu_enc->cur_master = NULL;
- spin_lock_init(&dpu_enc->enc_spinlock);
-
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
- if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
mutex_init(&dpu_enc->rc_lock);
- kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
@@ -2423,6 +2243,9 @@
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+ spin_lock_init(&dpu_enc->enc_spinlock);
+ dpu_enc->enabled = false;
+
return &dpu_enc->base;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 60f809f..b491346 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DPU_ENCODER_H__
@@ -32,45 +21,36 @@
/**
* Encoder functions and data types
* @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
- * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
- * @display_num_of_h_tiles: Number of horizontal tiles in case of split
- * interface
- * @topology: Topology of the display
*/
struct dpu_encoder_hw_resources {
enum dpu_intf_mode intfs[INTF_MAX];
- bool needs_cdm;
- u32 display_num_of_h_tiles;
-};
-
-/**
- * dpu_encoder_kickoff_params - info encoder requires at kickoff
- * @affected_displays: bitmask, bit set means the ROI of the commit lies within
- * the bounds of the physical display at the bit index
- */
-struct dpu_encoder_kickoff_params {
- unsigned long affected_displays;
};
/**
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
- * @conn_state: report hw reqs based on this proposed connector state
*/
void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state);
+ struct dpu_encoder_hw_resources *hw_res);
/**
- * dpu_encoder_register_vblank_callback - provide callback to encoder that
- * will be called on the next vblank.
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
* @encoder: encoder pointer
- * @cb: callback pointer, provide NULL to deregister and disable IRQs
- * @data: user data provided to callback
+ * @crtc: crtc pointer
*/
-void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
- void (*cb)(void *), void *data);
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
+ */
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc, bool enable);
/**
* dpu_encoder_register_frame_event_callback - provide callback to encoder that
@@ -88,10 +68,8 @@
* Immediately: if no previous commit is outstanding.
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
- * @params: kickoff time parameters
*/
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
- struct dpu_encoder_kickoff_params *params);
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@@ -108,6 +86,11 @@
void dpu_encoder_kickoff(struct drm_encoder *encoder);
/**
+ * dpu_encoder_wakeup_time - get the time of the next vsync
+ */
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
+
+/**
* dpu_encoder_wait_for_event - Waits for encoder events
* @encoder: encoder pointer
* @event: event to wait for
@@ -134,10 +117,10 @@
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
/**
- * dpu_encoder_virt_restore - restore the encoder configs
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
* @encoder: encoder pointer
*/
-void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
/**
* dpu_encoder_init - initialize virtual encoder object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index c7df8aa..f8f2515 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __DPU_ENCODER_PHYS_H__
@@ -22,8 +13,8 @@
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
-#include "dpu_hw_cdm.h"
#include "dpu_encoder.h"
+#include "dpu_crtc.h"
#define DPU_ENCODER_NAME_MAX 16
@@ -114,10 +105,6 @@
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @setup_misr: Sets up MISR, enable and disables based on sysfs
- * @collect_misr: Collects MISR data on frame update
- * @hw_reset: Issue HW recovery such as CTL reset and clear
- * DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
@@ -143,22 +130,15 @@
struct drm_connector_state *conn_state);
void (*destroy)(struct dpu_encoder_phys *encoder);
void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state);
+ struct dpu_encoder_hw_resources *hw_res);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
- void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
-
- void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
- bool enable, u32 frame_count);
- u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
- void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
@@ -210,16 +190,14 @@
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
- * @hw_cdm: Hardware interface to the cdm registers
- * @cdm_cfg: Chroma-down hardware configuration
* @hw_pp: Hardware interface to the ping pong registers
+ * @hw_intf: Hardware interface to the intf registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
- * @topology_name: topology selected for the display
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
@@ -241,15 +219,13 @@
const struct dpu_encoder_virt_ops *parent_ops;
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
- struct dpu_hw_cdm *hw_cdm;
- struct dpu_hw_cdm_cfg cdm_cfg;
struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_intf *hw_intf;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
- enum dpu_rm_topology_name topology_name;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
@@ -268,19 +244,6 @@
}
/**
- * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
- * mode specific operations
- * @base: Baseclass physical encoder structure
- * @hw_intf: Hardware interface to the intf registers
- * @timing_params: Current timing parameter
- */
-struct dpu_encoder_phys_vid {
- struct dpu_encoder_phys base;
- struct dpu_hw_intf *hw_intf;
- struct intf_timing_params timing_params;
-};
-
-/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
@@ -355,23 +318,18 @@
*/
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_hw_reset - issue ctl hw reset
- * This helper function may be optionally specified by physical
- * encoders if they require ctl hw reset. If state is currently
- * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
- * @phys_enc: Pointer to physical encoder structure
- */
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
-
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
+ struct dpu_crtc_state *dpu_cstate;
+
if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
return BLEND_3D_NONE;
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
+
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 3084675..2923b63 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -44,14 +35,7 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
-static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
- struct dpu_encoder_phys_cmd *cmd_enc)
-{
- return KICKOFF_TIMEOUT_MS;
-}
-
-static inline bool dpu_encoder_phys_cmd_is_master(
- struct dpu_encoder_phys *phys_enc)
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
}
@@ -196,9 +180,6 @@
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
- struct dpu_rm_hw_iter iter;
- int i, instance;
if (!phys_enc || !mode || !adj_mode) {
DPU_ERROR("invalid args\n");
@@ -208,22 +189,6 @@
DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
drm_mode_debug_printmodeline(adj_mode);
- instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
- /* Retrieve previously allocated HW Resources. Shouldn't fail */
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i <= instance; i++) {
- if (dpu_rm_get_hw(rm, &iter))
- phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
- }
-
- if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
- DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
- PTR_ERR(phys_enc->hw_ctl));
- phys_enc->hw_ctl = NULL;
- return;
- }
-
_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
}
@@ -262,7 +227,6 @@
atomic_read(&phys_enc->pending_kickoff_cnt));
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
- dpu_dbg_dump(false, __func__, true, true);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -431,7 +395,8 @@
return;
}
- tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+ tc_cfg.vsync_count = vsync_hz /
+ (mode->vtotal * drm_mode_vrefresh(mode));
/* enable external TE after kickoff to avoid premature autorefresh */
tc_cfg.hw_vsync_mode = 0;
@@ -451,7 +416,7 @@
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
- mode->vtotal, mode->vrefresh);
+ mode->vtotal, drm_mode_vrefresh(mode));
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
@@ -515,14 +480,11 @@
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- goto skip_flush;
+ return;
ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
-
-skip_flush:
- return;
}
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
@@ -618,29 +580,13 @@
static void dpu_encoder_phys_cmd_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
- struct dpu_encoder_phys_cmd *cmd_enc =
- to_dpu_encoder_phys_cmd(phys_enc);
-
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
- if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
- DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
- return;
- }
-
- DPU_DEBUG_CMDENC(cmd_enc, "\n");
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
- struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
@@ -738,7 +684,7 @@
/* required for both controllers */
if (!rc && cmd_enc->serialize_wait4pp)
- dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
return rc;
}
@@ -761,7 +707,7 @@
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
- wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
atomic_inc(&cmd_enc->pending_vblank_cnt);
@@ -774,9 +720,6 @@
static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
- if (!phys_enc)
- return;
-
/**
* re-enable external TE, either for the first time after enabling
* or if disabled for Autorefresh
@@ -810,7 +753,6 @@
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
@@ -823,7 +765,6 @@
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
- struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -833,17 +774,10 @@
if (!cmd_enc) {
ret = -ENOMEM;
DPU_ERROR("failed to allocate\n");
- goto fail;
+ return ERR_PTR(ret);
}
phys_enc = &cmd_enc->base;
-
- hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
- if (IS_ERR_OR_NULL(hw_mdp)) {
- ret = PTR_ERR(hw_mdp);
- DPU_ERROR("failed to get mdptop\n");
- goto fail_mdp_init;
- }
- phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
@@ -898,8 +832,5 @@
return phys_enc;
-fail_mdp_init:
- kfree(cmd_enc);
-fail:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 14fc7c2..b9c84fb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -18,14 +10,14 @@
#include "dpu_trace.h"
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
@@ -44,7 +36,7 @@
}
static void drm_mode_to_intf_timing_params(
- const struct dpu_encoder_phys_vid *vid_enc,
+ const struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct intf_timing_params *timing)
{
@@ -92,7 +84,7 @@
timing->hsync_skew = mode->hskew;
/* DSI controller cannot handle active-low sync signals. */
- if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ if (phys_enc->hw_intf->cap->type == INTF_DSI) {
timing->hsync_polarity = 0;
timing->vsync_polarity = 0;
}
@@ -110,7 +102,7 @@
*/
}
-static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+static u32 get_horizontal_total(const struct intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
@@ -119,7 +111,7 @@
return active + inactive;
}
-static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+static u32 get_vertical_total(const struct intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
@@ -143,11 +135,11 @@
* lines based on the chip worst case latencies.
*/
static u32 programmable_fetch_get_num_lines(
- struct dpu_encoder_phys_vid *vid_enc,
+ struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
u32 worst_case_needed_lines =
- vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
u32 start_of_frame_lines =
timing->v_back_porch + timing->vsync_pulse_width;
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
@@ -155,26 +147,26 @@
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"prog fetch is not needed, large vbp+vsw\n");
actual_vfp_lines = 0;
} else if (timing->v_front_porch < needed_vfp_lines) {
/* Warn fetch needed, but not enough porch in panel config */
pr_warn_once
("low vbp+vfp may lead to perf issues in some cases\n");
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"less vfp than fetch req, using entire vfp\n");
actual_vfp_lines = timing->v_front_porch;
} else {
- DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
actual_vfp_lines = needed_vfp_lines;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
timing->v_front_porch, timing->v_back_porch,
timing->vsync_pulse_width);
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
@@ -194,8 +186,6 @@
static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
- struct dpu_encoder_phys_vid *vid_enc =
- to_dpu_encoder_phys_vid(phys_enc);
struct intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
@@ -203,10 +193,10 @@
u32 vfp_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
- if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
return;
- vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
if (vfp_fetch_lines) {
vert_total = get_vertical_total(timing);
horiz_total = get_horizontal_total(timing);
@@ -216,12 +206,12 @@
f.fetch_start = vfp_fetch_start_vsync_counter;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
vfp_fetch_lines, vfp_fetch_start_vsync_counter);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
}
@@ -231,7 +221,7 @@
struct drm_display_mode *adj_mode)
{
if (phys_enc)
- DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
@@ -242,7 +232,6 @@
static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct drm_display_mode mode;
struct intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
@@ -256,13 +245,12 @@
}
mode = phys_enc->cached_mode;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ if (!phys_enc->hw_intf->ops.setup_timing_gen) {
DPU_ERROR("timing engine setup is not supported\n");
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
if (phys_enc->split_role != ENC_ROLE_SOLO) {
@@ -271,32 +259,30 @@
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"split_role %d, halve horizontal %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
mode.hsync_start, mode.hsync_end);
}
- drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+ drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
fmt = dpu_get_dpu_format(fmt_fourcc);
- DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+ DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
- intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
-
- vid_enc->timing_params = timing_params;
}
static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
@@ -331,13 +317,17 @@
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
- if (flush_register == 0)
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
+
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
DPU_ATRACE_END("vblank_irq");
}
@@ -353,21 +343,10 @@
phys_enc);
}
-static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
-{
- if (!phys_enc)
- return false;
-
- if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
- return true;
-
- return false;
-}
-
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+ return phys_enc->split_role != ENC_ROLE_SOLO;
}
static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
@@ -395,38 +374,15 @@
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct dpu_rm *rm;
- struct dpu_rm_hw_iter iter;
- int i, instance;
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc || !phys_enc->dpu_kms) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
- rm = &phys_enc->dpu_kms->rm;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
- DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
- }
-
- instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
- /* Retrieve previously allocated HW Resources. Shouldn't fail */
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i <= instance; i++) {
- if (dpu_rm_get_hw(rm, &iter))
- phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
- }
- if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
- DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
- PTR_ERR(phys_enc->hw_ctl));
- phys_enc->hw_ctl = NULL;
- return;
+ DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n");
}
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
@@ -437,7 +393,6 @@
bool enable)
{
int ret = 0;
- struct dpu_encoder_phys_vid *vid_enc;
int refcount;
if (!phys_enc) {
@@ -446,7 +401,6 @@
}
refcount = atomic_read(&phys_enc->vblank_refcount);
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_vid_is_master(phys_enc))
@@ -471,7 +425,7 @@
if (ret) {
DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ phys_enc->hw_intf->idx - INTF_0, ret, enable,
refcount);
}
return ret;
@@ -479,34 +433,17 @@
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
- struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
- struct dpu_hw_intf *intf;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
- !phys_enc->parent->dev->dev_private) {
- DPU_ERROR("invalid encoder/device\n");
- return;
- }
- priv = phys_enc->parent->dev->dev_private;
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- intf = vid_enc->hw_intf;
ctl = phys_enc->hw_ctl;
- if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
- DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
- return;
- }
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
- dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
@@ -519,12 +456,13 @@
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
- ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
- DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
- ctl->idx - CTL_0, flush_mask);
+ DPU_DEBUG_VIDENC(phys_enc,
+ "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -533,43 +471,24 @@
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "\n");
- kfree(vid_enc);
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+ kfree(phys_enc);
}
static void dpu_encoder_phys_vid_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc || !hw_res) {
- DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
- phys_enc != 0, hw_res != 0, conn_state != 0);
- return;
- }
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf) {
- DPU_ERROR("invalid arg(s), hw_intf\n");
- return;
- }
-
- DPU_DEBUG_VIDENC(vid_enc, "\n");
- hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
}
-static int _dpu_encoder_phys_vid_wait_for_vblank(
- struct dpu_encoder_phys *phys_enc, bool notify)
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_wait_info wait_info;
int ret;
@@ -584,10 +503,6 @@
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
- if (notify && phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(
- phys_enc->parent, phys_enc,
- DPU_ENCODER_FRAME_EVENT_DONE);
return 0;
}
@@ -597,33 +512,41 @@
if (ret == -ETIMEDOUT) {
dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
- } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(
- phys_enc->parent, phys_enc,
- DPU_ENCODER_FRAME_EVENT_DONE);
+ }
return ret;
}
-static int dpu_encoder_phys_vid_wait_for_vblank(
+static int dpu_encoder_phys_vid_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+ int ret;
+
+ if (!hw_ctl)
+ return 0;
+
+ ret = wait_event_timeout(phys_enc->pending_kickoff_wq,
+ (hw_ctl->ops.get_flush_register(hw_ctl) == 0),
+ msecs_to_jiffies(50));
+ if (ret <= 0) {
+ DPU_ERROR("vblank timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static void dpu_encoder_phys_vid_prepare_for_kickoff(
- struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc || !params) {
+ if (!phys_enc) {
DPU_ERROR("invalid encoder/parameters\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.wait_reset_status)
@@ -635,17 +558,15 @@
*/
rc = ctl->ops.wait_reset_status(ctl);
if (rc) {
- DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
- dpu_dbg_dump(false, __func__, true, true);
}
}
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
unsigned long lock_flags;
int ret;
@@ -656,16 +577,13 @@
}
priv = phys_enc->parent->dev->dev_private;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "\n");
-
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
@@ -674,7 +592,7 @@
}
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
if (dpu_encoder_phys_vid_is_master(phys_enc))
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -688,12 +606,12 @@
* scanout buffer) don't latch properly..
*/
if (dpu_encoder_phys_vid_is_master(phys_enc)) {
- ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret);
+ phys_enc->hw_intf->idx - INTF_0, ret);
}
}
@@ -704,15 +622,6 @@
struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
/*
* Video mode must flush CTL before enabling timing engine
@@ -720,9 +629,9 @@
*/
if (phys_enc->enable_state == DPU_ENC_ENABLING) {
trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0);
+ phys_enc->hw_intf->idx - INTF_0);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -731,16 +640,13 @@
static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- struct dpu_encoder_phys_vid *vid_enc;
int ret;
if (!phys_enc)
return;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0,
+ phys_enc->hw_intf->idx - INTF_0,
enable,
atomic_read(&phys_enc->vblank_refcount));
@@ -756,48 +662,19 @@
}
}
-static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
- bool enable, u32 frame_count)
-{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc)
- return;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
- if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
- vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
- enable, frame_count);
-}
-
-static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc)
- return 0;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
- return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
- vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
-}
-
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc)
return -EINVAL;
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
return -EINVAL;
- return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+ return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
}
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
@@ -810,16 +687,13 @@
ops->destroy = dpu_encoder_phys_vid_destroy;
ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
- ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
ops->irq_control = dpu_encoder_phys_vid_irq_control;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
- ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
- ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
@@ -827,9 +701,6 @@
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
- struct dpu_encoder_phys_vid *vid_enc = NULL;
- struct dpu_rm_hw_iter iter;
- struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -838,44 +709,16 @@
goto fail;
}
- vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
- if (!vid_enc) {
+ phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ if (!phys_enc) {
ret = -ENOMEM;
goto fail;
}
- phys_enc = &vid_enc->base;
-
- hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
- if (IS_ERR_OR_NULL(hw_mdp)) {
- ret = PTR_ERR(hw_mdp);
- DPU_ERROR("failed to get mdptop\n");
- goto fail;
- }
- phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
- /**
- * hw_intf resource permanently assigned to this encoder
- * Other resources allocated at atomic commit time by use case
- */
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
- while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
- struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
-
- if (hw_intf->idx == p->intf_idx) {
- vid_enc->hw_intf = hw_intf;
- break;
- }
- }
-
- if (!vid_enc->hw_intf) {
- ret = -EINVAL;
- DPU_ERROR("failed to get hw_intf\n");
- goto fail;
- }
-
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
@@ -909,13 +752,13 @@
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = DPU_ENC_DISABLED;
- DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+ DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
return phys_enc;
fail:
DPU_ERROR("failed to create encoder\n");
- if (vid_enc)
+ if (phys_enc)
dpu_encoder_phys_vid_destroy(phys_enc);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index bfcd165..24ab624 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -216,7 +208,7 @@
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, 0,
+ false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
@@ -263,13 +255,13 @@
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
@@ -479,90 +471,6 @@
};
/*
- * A5x tile formats tables:
- * These tables hold the A5x tile formats supported.
- */
-static const struct dpu_format dpu_format_map_tile[] = {
- INTERLEAVED_RGB_FMT_TILED(BGR565,
- 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
- false, 2, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(ARGB8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- true, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(ABGR8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
- true, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(XBGR8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(RGBA8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(BGRA8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- true, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(BGRX8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
- false, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(XRGB8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
- false, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(RGBX8888,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- false, 4, 0,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
- COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, DPU_FORMAT_FLAG_DX,
- DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
-
- PSEUDO_YUV_FMT_TILED(NV12,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
-
- PSEUDO_YUV_FMT_TILED(NV21,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C1_B_Cb,
- DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
- DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
-};
-
-/*
* UBWC formats table:
* This table holds the UBWC formats supported.
* If a compression ratio needs to be used for this or any other format,
@@ -607,32 +515,6 @@
DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
};
-static const struct dpu_format dpu_format_map_p010[] = {
- PSEUDO_YUV_FMT_LOOSE(NV12,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
- DPU_FETCH_LINEAR, 2),
-};
-
-static const struct dpu_format dpu_format_map_p010_ubwc[] = {
- PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
- DPU_FORMAT_FLAG_COMPRESSED),
- DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
-};
-
-static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
- PSEUDO_YUV_FMT_TILED(NV12,
- 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C2_R_Cr,
- DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
- DPU_FORMAT_FLAG_COMPRESSED),
- DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
-};
-
/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
* Note: Not using the drm_format_*_subsampling since we have formats
*/
@@ -921,7 +803,7 @@
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
- goto done;
+ return 0;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
@@ -952,12 +834,11 @@
layout->plane_addr[1] = 0;
if (!meta)
- goto done;
+ return 0;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
-done:
return 0;
}
@@ -1041,10 +922,11 @@
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos)
{
- int ret, i, num_base_fmt_planes;
+ const struct drm_format_info *info;
const struct dpu_format *fmt;
struct dpu_hw_fmt_layout layout;
uint32_t bos_total_size = 0;
+ int ret, i;
if (!msm_fmt || !cmd || !bos) {
DRM_ERROR("invalid arguments\n");
@@ -1052,14 +934,16 @@
}
fmt = to_dpu_format(msm_fmt);
- num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+ info = drm_format_info(fmt->base.pixel_format);
+ if (!info)
+ return -EINVAL;
ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
&layout, cmd->pitches);
if (ret)
return ret;
- for (i = 0; i < num_base_fmt_planes; i++) {
+ for (i = 0; i < info->num_planes; i++) {
if (!bos[i]) {
DRM_ERROR("invalid handle for plane %d\n", i);
return -EINVAL;
@@ -1138,36 +1022,3 @@
return &fmt->base;
return NULL;
}
-
-uint32_t dpu_populate_formats(
- const struct dpu_format_extended *format_list,
- uint32_t *pixel_formats,
- uint64_t *pixel_modifiers,
- uint32_t pixel_formats_max)
-{
- uint32_t i, fourcc_format;
-
- if (!format_list || !pixel_formats)
- return 0;
-
- for (i = 0, fourcc_format = 0;
- format_list->fourcc_format && i < pixel_formats_max;
- ++format_list) {
- /* verify if listed format is in dpu_format_map? */
-
- /* optionally return modified formats */
- if (pixel_modifiers) {
- /* assume same modifier for all fb planes */
- pixel_formats[i] = format_list->fourcc_format;
- pixel_modifiers[i++] = format_list->modifier;
- } else {
- /* assume base formats grouped together */
- if (fourcc_format != format_list->fourcc_format) {
- fourcc_format = format_list->fourcc_format;
- pixel_formats[i++] = fourcc_format;
- }
- }
- }
-
- return i;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index a54451d..418f5ae 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_FORMATS_H
@@ -41,20 +33,6 @@
const uint64_t modifiers);
/**
- * dpu_populate_formats - populate the given array with fourcc codes supported
- * @format_list: pointer to list of possible formats
- * @pixel_formats: array to populate with fourcc codes
- * @pixel_modifiers: array to populate with drm modifiers, can be NULL
- * @pixel_formats_max: length of pixel formats array
- * Return: number of elements populated
- */
-uint32_t dpu_populate_formats(
- const struct dpu_format_extended *format_list,
- uint32_t *pixel_formats,
- uint64_t *pixel_modifiers,
- uint32_t pixel_formats_max);
-
-/**
* dpu_format_check_modified_format - validate format and buffers for
* dpu non-standard, i.e. modified format
* @kms: kms driver
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
index 58d29e4..ca26666 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -30,16 +22,10 @@
* @type: hw block type - enum dpu_hw_blk_type
* @id: instance id of the hw block
* @ops: Pointer to block operations
- * return: 0 if success; error code otherwise
*/
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops)
{
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return -EINVAL;
- }
-
INIT_LIST_HEAD(&hw_blk->list);
hw_blk->type = type;
hw_blk->id = id;
@@ -51,8 +37,6 @@
mutex_lock(&dpu_hw_blk_lock);
list_add(&hw_blk->list, &dpu_hw_blk_list);
mutex_unlock(&dpu_hw_blk_lock);
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
index 0f4ca8a..2bf737f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_BLK_H
@@ -44,7 +36,7 @@
struct dpu_hw_blk_ops ops;
};
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops);
void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 44ee063..04c8c44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -29,6 +21,9 @@
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+#define DMA_CURSOR_SDM845_MASK \
+ (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
@@ -71,7 +66,6 @@
.base = 0x0, .len = 0x45C,
.features = 0,
.highest_bank_bit = 0x2,
- .has_dest_scaler = true,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
@@ -149,7 +143,9 @@
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
#define _DMA_SBLK(num, sdma_pri) \
@@ -161,7 +157,9 @@
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
@@ -174,45 +172,35 @@
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
-#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+#define SSPP_BLK(_name, _id, _base, _features, \
+ _sblk, _xinid, _type, _clkctrl) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x1c8, \
- .features = VIG_SDM845_MASK, \
+ .features = _features, \
.sblk = &_sblk, \
.xin_id = _xinid, \
- .type = SSPP_TYPE_VIG, \
- .clk_ctrl = _clkctrl \
- }
-
-#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
- { \
- .name = _name, .id = _id, \
- .base = _base, .len = 0x1c8, \
- .features = DMA_SDM845_MASK, \
- .sblk = &_sblk, \
- .xin_id = _xinid, \
- .type = SSPP_TYPE_DMA, \
+ .type = _type, \
.clk_ctrl = _clkctrl \
}
static struct dpu_sspp_cfg sdm845_sspp[] = {
- SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
- sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
- SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
- sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
- SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
- sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
- SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
- sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
- SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
- sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
- SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
- sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
- SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
- sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
- SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
- sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
/*************************************************************
@@ -227,48 +215,23 @@
},
};
-#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = MIXER_SDM845_MASK, \
.sblk = &sdm845_lm_sblk, \
- .ds = _ds, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
static struct dpu_lm_cfg sdm845_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
- LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
- LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
- LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
- LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
- LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
-};
-
-/*************************************************************
- * DS sub blocks config
- *************************************************************/
-static const struct dpu_ds_top_cfg sdm845_ds_top = {
- .name = "ds_top_0", .id = DS_TOP,
- .base = 0x60000, .len = 0xc,
- .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
- .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
- .maxupscale = MAX_UPSCALE_RATIO,
-};
-
-#define DS_BLK(_name, _id, _base) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0x800, \
- .features = DPU_SSPP_SCALER_QSEED3, \
- .top = &sdm845_ds_top \
- }
-
-static struct dpu_ds_cfg sdm845_ds[] = {
- DS_BLK("ds_0", DS_0, 0x800),
- DS_BLK("ds_1", DS_1, 0x1000),
+ LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
};
/*************************************************************
@@ -328,18 +291,6 @@
};
/*************************************************************
- * CDM sub blocks config
- *************************************************************/
-static struct dpu_cdm_cfg sdm845_cdm[] = {
- {
- .name = "cdm_0", .id = CDM_0,
- .base = 0x79200, .len = 0x224,
- .features = 0,
- .intf_connect = BIT(INTF_3),
- },
-};
-
-/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
@@ -461,12 +412,8 @@
.sspp = sdm845_sspp,
.mixer_count = ARRAY_SIZE(sdm845_lm),
.mixer = sdm845_lm,
- .ds_count = ARRAY_SIZE(sdm845_ds),
- .ds = sdm845_ds,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
- .cdm_count = ARRAY_SIZE(sdm845_cdm),
- .cdm = sdm845_cdm,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index f0cb0d4..ec76b86 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_CATALOG_H
@@ -17,7 +9,6 @@
#include <linux/bug.h>
#include <linux/bitmap.h>
#include <linux/err.h>
-#include <drm/drmP.h>
/**
* Max hardware block count: For ex: max 12 SSPP pipes or
@@ -252,17 +243,6 @@
};
/**
- * struct dpu_format_extended - define dpu specific pixel format+modifier
- * @fourcc_format: Base FOURCC pixel format code
- * @modifier: 64-bit drm format modifier, same modifier must be applied to all
- * framebuffer planes
- */
-struct dpu_format_extended {
- uint32_t fourcc_format;
- uint64_t modifier;
-};
-
-/**
* enum dpu_qos_lut_usage - define QoS LUT use cases
*/
enum dpu_qos_lut_usage {
@@ -348,7 +328,9 @@
* @pcc_blk:
* @igc_blk:
* @format_list: Pointer to list of supported formats
+ * @num_formats: Number of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
+ * @virt_num_formats: Number of supported formats for virtual planes
*/
struct dpu_sspp_sub_blks {
const struct dpu_sspp_blks_common *common;
@@ -366,8 +348,10 @@
struct dpu_pp_blk pcc_blk;
struct dpu_pp_blk igc_blk;
- const struct dpu_format_extended *format_list;
- const struct dpu_format_extended *virt_format_list;
+ const u32 *format_list;
+ u32 num_formats;
+ const u32 *virt_format_list;
+ u32 virt_num_formats;
};
/**
@@ -428,7 +412,6 @@
* @highest_bank_bit: UBWC parameter
* @ubwc_static: ubwc static configuration
* @ubwc_swizzle: ubwc default swizzle setting
- * @has_dest_scaler: indicates support of destination scaler
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
@@ -436,7 +419,6 @@
u32 highest_bank_bit;
u32 ubwc_static;
u32 ubwc_swizzle;
- bool has_dest_scaler;
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
@@ -474,50 +456,16 @@
* @features bit mask identifying sub-blocks/features
* @sblk: LM Sub-blocks information
* @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
- * @ds: ID of connected DS, DS_MAX if unsupported
* @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
- u32 ds;
unsigned long lm_pair_mask;
};
/**
- * struct dpu_ds_top_cfg - information of dest scaler top
- * @id enum identifying this block
- * @base register offset of this block
- * @features bit mask identifying features
- * @version hw version of dest scaler
- * @maxinputwidth maximum input line width
- * @maxoutputwidth maximum output line width
- * @maxupscale maximum upscale ratio
- */
-struct dpu_ds_top_cfg {
- DPU_HW_BLK_INFO;
- u32 version;
- u32 maxinputwidth;
- u32 maxoutputwidth;
- u32 maxupscale;
-};
-
-/**
- * struct dpu_ds_cfg - information of dest scaler blocks
- * @id enum identifying this block
- * @base register offset wrt DS top offset
- * @features bit mask identifying features
- * @version hw version of the qseed block
- * @top DS top information
- */
-struct dpu_ds_cfg {
- DPU_HW_BLK_INFO;
- u32 version;
- const struct dpu_ds_top_cfg *top;
-};
-
-/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
@@ -530,18 +478,6 @@
};
/**
- * struct dpu_cdm_cfg - information of chroma down blocks
- * @id enum identifying this block
- * @base register offset of this block
- * @features bit mask identifying sub-blocks/features
- * @intf_connect Bitmask of INTF IDs this CDM can connect to
- */
-struct dpu_cdm_cfg {
- DPU_HW_BLK_INFO;
- unsigned long intf_connect;
-};
-
-/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
@@ -728,15 +664,9 @@
u32 mixer_count;
struct dpu_lm_cfg *mixer;
- u32 ds_count;
- struct dpu_ds_cfg *ds;
-
u32 pingpong_count;
struct dpu_pingpong_cfg *pingpong;
- u32 cdm_count;
- struct dpu_cdm_cfg *cdm;
-
u32 intf_count;
struct dpu_intf_cfg *intf;
@@ -771,9 +701,7 @@
#define BLK_DMA(s) ((s)->dma)
#define BLK_CURSOR(s) ((s)->cursor)
#define BLK_MIXER(s) ((s)->mixer)
-#define BLK_DS(s) ((s)->ds)
#define BLK_PINGPONG(s) ((s)->pingpong)
-#define BLK_CDM(s) ((s)->cdm)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
@@ -792,13 +720,4 @@
*/
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
-/**
- * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
- * @cfg: pointer to sspp cfg
- */
-static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
-{
- return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
- test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
-}
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
index 3c9f028..bb6112c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -1,168 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dpu_hw_mdss.h"
-static const struct dpu_format_extended plane_formats[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {0, 0},
+static const uint32_t qcom_compressed_supported_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_BGR565,
};
-static const struct dpu_format_extended plane_formats_yuv[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
-
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_NV21, 0},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_NV61, 0},
- {DRM_FORMAT_VYUY, 0},
- {DRM_FORMAT_UYVY, 0},
- {DRM_FORMAT_YUYV, 0},
- {DRM_FORMAT_YVYU, 0},
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_YVU420, 0},
- {0, 0},
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
};
-static const struct dpu_format_extended cursor_formats[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {0, 0},
-};
+static const uint32_t plane_formats_yuv[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
-static const struct dpu_format_extended wb2_formats[] = {
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
-
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
-
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_YUYV, 0},
-
- {0, 0},
-};
-
-static const struct dpu_format_extended rgb_10bit_formats[] = {
- {DRM_FORMAT_BGRA1010102, 0},
- {DRM_FORMAT_BGRX1010102, 0},
- {DRM_FORMAT_RGBA1010102, 0},
- {DRM_FORMAT_RGBX1010102, 0},
- {DRM_FORMAT_ABGR2101010, 0},
- {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_XBGR2101010, 0},
- {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_ARGB2101010, 0},
- {DRM_FORMAT_XRGB2101010, 0},
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
deleted file mode 100644
index 554874b..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "dpu_hw_mdss.h"
-#include "dpu_hwio.h"
-#include "dpu_hw_catalog.h"
-#include "dpu_hw_cdm.h"
-#include "dpu_dbg.h"
-#include "dpu_kms.h"
-
-#define CDM_CSC_10_OPMODE 0x000
-#define CDM_CSC_10_BASE 0x004
-
-#define CDM_CDWN2_OP_MODE 0x100
-#define CDM_CDWN2_CLAMP_OUT 0x104
-#define CDM_CDWN2_PARAMS_3D_0 0x108
-#define CDM_CDWN2_PARAMS_3D_1 0x10C
-#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
-#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
-#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
-#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
-#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
-#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
-#define CDM_CDWN2_COEFF_COSITE_V 0x128
-#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
-#define CDM_CDWN2_OUT_SIZE 0x130
-
-#define CDM_HDMI_PACK_OP_MODE 0x200
-#define CDM_CSC_10_MATRIX_COEFF_0 0x004
-
-/**
- * Horizontal coefficients for cosite chroma downscale
- * s13 representation of coefficients
- */
-static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
-
-/**
- * Horizontal coefficients for offsite chroma downscale
- */
-static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
-
-/**
- * Vertical coefficients for cosite chroma downscale
- */
-static u32 cosite_v_coeff[] = {0x00080004};
-/**
- * Vertical coefficients for offsite chroma downscale
- */
-static u32 offsite_v_coeff[] = {0x00060002};
-
-/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
-static struct dpu_csc_cfg rgb2yuv_cfg = {
- {
- 0x0083, 0x0102, 0x0032,
- 0x1fb5, 0x1f6c, 0x00e1,
- 0x00e1, 0x1f45, 0x1fdc
- },
- { 0x00, 0x00, 0x00 },
- { 0x0040, 0x0200, 0x0200 },
- { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
- { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
-static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
- struct dpu_mdss_cfg *m,
- void __iomem *addr,
- struct dpu_hw_blk_reg_map *b)
-{
- int i;
-
- for (i = 0; i < m->cdm_count; i++) {
- if (cdm == m->cdm[i].id) {
- b->base_off = addr;
- b->blk_off = m->cdm[i].base;
- b->length = m->cdm[i].len;
- b->hwversion = m->hwversion;
- b->log_mask = DPU_DBG_MASK_CDM;
- return &m->cdm[i];
- }
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
- struct dpu_csc_cfg *data)
-{
- dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
-
- return 0;
-}
-
-static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
- struct dpu_hw_cdm_cfg *cfg)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 opmode = 0;
- u32 out_size = 0;
-
- if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
- opmode &= ~BIT(7);
- else
- opmode |= BIT(7);
-
- /* ENABLE DWNS_H bit */
- opmode |= BIT(1);
-
- switch (cfg->h_cdwn_type) {
- case CDM_CDWN_DISABLE:
- /* CLEAR METHOD_H field */
- opmode &= ~(0x18);
- /* CLEAR DWNS_H bit */
- opmode &= ~BIT(1);
- break;
- case CDM_CDWN_PIXEL_DROP:
- /* Clear METHOD_H field (pixel drop is 0) */
- opmode &= ~(0x18);
- break;
- case CDM_CDWN_AVG:
- /* Clear METHOD_H field (Average is 0x1) */
- opmode &= ~(0x18);
- opmode |= (0x1 << 0x3);
- break;
- case CDM_CDWN_COSITE:
- /* Clear METHOD_H field (Average is 0x2) */
- opmode &= ~(0x18);
- opmode |= (0x2 << 0x3);
- /* Co-site horizontal coefficients */
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
- cosite_h_coeff[0]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
- cosite_h_coeff[1]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
- cosite_h_coeff[2]);
- break;
- case CDM_CDWN_OFFSITE:
- /* Clear METHOD_H field (Average is 0x3) */
- opmode &= ~(0x18);
- opmode |= (0x3 << 0x3);
-
- /* Off-site horizontal coefficients */
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
- offsite_h_coeff[0]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
- offsite_h_coeff[1]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
- offsite_h_coeff[2]);
- break;
- default:
- pr_err("%s invalid horz down sampling type\n", __func__);
- return -EINVAL;
- }
-
- /* ENABLE DWNS_V bit */
- opmode |= BIT(2);
-
- switch (cfg->v_cdwn_type) {
- case CDM_CDWN_DISABLE:
- /* CLEAR METHOD_V field */
- opmode &= ~(0x60);
- /* CLEAR DWNS_V bit */
- opmode &= ~BIT(2);
- break;
- case CDM_CDWN_PIXEL_DROP:
- /* Clear METHOD_V field (pixel drop is 0) */
- opmode &= ~(0x60);
- break;
- case CDM_CDWN_AVG:
- /* Clear METHOD_V field (Average is 0x1) */
- opmode &= ~(0x60);
- opmode |= (0x1 << 0x5);
- break;
- case CDM_CDWN_COSITE:
- /* Clear METHOD_V field (Average is 0x2) */
- opmode &= ~(0x60);
- opmode |= (0x2 << 0x5);
- /* Co-site vertical coefficients */
- DPU_REG_WRITE(c,
- CDM_CDWN2_COEFF_COSITE_V,
- cosite_v_coeff[0]);
- break;
- case CDM_CDWN_OFFSITE:
- /* Clear METHOD_V field (Average is 0x3) */
- opmode &= ~(0x60);
- opmode |= (0x3 << 0x5);
-
- /* Off-site vertical coefficients */
- DPU_REG_WRITE(c,
- CDM_CDWN2_COEFF_OFFSITE_V,
- offsite_v_coeff[0]);
- break;
- default:
- return -EINVAL;
- }
-
- if (cfg->v_cdwn_type || cfg->h_cdwn_type)
- opmode |= BIT(0); /* EN CDWN module */
- else
- opmode &= ~BIT(0);
-
- out_size = (cfg->output_width & 0xFFFF) |
- ((cfg->output_height & 0xFFFF) << 16);
- DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
- DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
- DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
- ((0x3FF << 16) | 0x0));
-
- return 0;
-}
-
-static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
- struct dpu_hw_cdm_cfg *cdm)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- const struct dpu_format *fmt = cdm->output_fmt;
- struct cdm_output_cfg cdm_cfg = { 0 };
- u32 opmode = 0;
- u32 csc = 0;
-
- if (!DPU_FORMAT_IS_YUV(fmt))
- return -EINVAL;
-
- if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
- if (fmt->chroma_sample != DPU_CHROMA_H1V2)
- return -EINVAL; /*unsupported format */
- opmode = BIT(0);
- opmode |= (fmt->chroma_sample << 1);
- cdm_cfg.intf_en = true;
- }
-
- csc |= BIT(2);
- csc &= ~BIT(1);
- csc |= BIT(0);
-
- if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
- ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-
- DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
- DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
- return 0;
-}
-
-static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
-{
- struct cdm_output_cfg cdm_cfg = { 0 };
-
- if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
- ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-}
-
-static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
- unsigned long features)
-{
- ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
- ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
- ops->enable = dpu_hw_cdm_enable;
- ops->disable = dpu_hw_cdm_disable;
-}
-
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
-
-struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
- void __iomem *addr,
- struct dpu_mdss_cfg *m,
- struct dpu_hw_mdp *hw_mdp)
-{
- struct dpu_hw_cdm *c;
- struct dpu_cdm_cfg *cfg;
- int rc;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return ERR_PTR(-ENOMEM);
-
- cfg = _cdm_offset(idx, m, addr, &c->hw);
- if (IS_ERR_OR_NULL(cfg)) {
- kfree(c);
- return ERR_PTR(-EINVAL);
- }
-
- c->idx = idx;
- c->caps = cfg;
- _setup_cdm_ops(&c->ops, c->caps->features);
- c->hw_mdp = hw_mdp;
-
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- /*
- * Perform any default initialization for the chroma down module
- * @setup default csc coefficients
- */
- dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
-
- return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
-}
-
-void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
-{
- if (cdm)
- dpu_hw_blk_destroy(&cdm->base);
- kfree(cdm);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
deleted file mode 100644
index 5cceb1e..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DPU_HW_CDM_H
-#define _DPU_HW_CDM_H
-
-#include "dpu_hw_mdss.h"
-#include "dpu_hw_top.h"
-#include "dpu_hw_blk.h"
-
-struct dpu_hw_cdm;
-
-struct dpu_hw_cdm_cfg {
- u32 output_width;
- u32 output_height;
- u32 output_bit_depth;
- u32 h_cdwn_type;
- u32 v_cdwn_type;
- const struct dpu_format *output_fmt;
- u32 output_type;
- int flags;
-};
-
-enum dpu_hw_cdwn_type {
- CDM_CDWN_DISABLE,
- CDM_CDWN_PIXEL_DROP,
- CDM_CDWN_AVG,
- CDM_CDWN_COSITE,
- CDM_CDWN_OFFSITE,
-};
-
-enum dpu_hw_cdwn_output_type {
- CDM_CDWN_OUTPUT_HDMI,
- CDM_CDWN_OUTPUT_WB,
-};
-
-enum dpu_hw_cdwn_output_bit_depth {
- CDM_CDWN_OUTPUT_8BIT,
- CDM_CDWN_OUTPUT_10BIT,
-};
-
-/**
- * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
- * Assumption is these functions will be called after
- * clocks are enabled
- * @setup_csc: Programs the csc matrix
- * @setup_cdwn: Sets up the chroma down sub module
- * @enable: Enables the output to interface and programs the
- * output packer
- * @disable: Puts the cdm in bypass mode
- */
-struct dpu_hw_cdm_ops {
- /**
- * Programs the CSC matrix for conversion from RGB space to YUV space,
- * it is optional to call this function as this matrix is automatically
- * set during initialization, user should call this if it wants
- * to program a different matrix than default matrix.
- * @cdm: Pointer to the chroma down context structure
- * @data Pointer to CSC configuration data
- * return: 0 if success; error code otherwise
- */
- int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
- struct dpu_csc_cfg *data);
-
- /**
- * Programs the Chroma downsample part.
- * @cdm Pointer to chroma down context
- */
- int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
- struct dpu_hw_cdm_cfg *cfg);
-
- /**
- * Enable the CDM module
- * @cdm Pointer to chroma down context
- */
- int (*enable)(struct dpu_hw_cdm *cdm,
- struct dpu_hw_cdm_cfg *cfg);
-
- /**
- * Disable the CDM module
- * @cdm Pointer to chroma down context
- */
- void (*disable)(struct dpu_hw_cdm *cdm);
-};
-
-struct dpu_hw_cdm {
- struct dpu_hw_blk base;
- struct dpu_hw_blk_reg_map hw;
-
- /* chroma down */
- const struct dpu_cdm_cfg *caps;
- enum dpu_cdm idx;
-
- /* mdp top hw driver */
- struct dpu_hw_mdp *hw_mdp;
-
- /* ops */
- struct dpu_hw_cdm_ops ops;
-};
-
-/**
- * dpu_hw_cdm - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_cdm, base);
-}
-
-/**
- * dpu_hw_cdm_init - initializes the cdm hw driver object.
- * should be called once before accessing every cdm.
- * @idx: cdm index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m : pointer to mdss catalog data
- * @hw_mdp: pointer to mdp top hw driver object
- */
-struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
- void __iomem *addr,
- struct dpu_mdss_cfg *m,
- struct dpu_hw_mdp *hw_mdp);
-
-/**
- * dpu_hw_cdm_destroy - destroys CDM driver context
- * @cdm: pointer to CDM driver context
- */
-void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
-
-#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 06be7cf..179e8d5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -1,20 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
+#include "dpu_trace.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -72,41 +64,6 @@
return stages;
}
-static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
-{
- DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
-}
-
-static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
-{
- DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
-}
-
-static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
-{
- ctx->pending_flush_mask = 0x0;
-}
-
-static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
- u32 flushbits)
-{
- ctx->pending_flush_mask |= flushbits;
-}
-
-static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
-{
- if (!ctx)
- return 0x0;
-
- return ctx->pending_flush_mask;
-}
-
-static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
-{
-
- DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
-}
-
static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -114,7 +71,48 @@
return DPU_REG_READ(c, CTL_FLUSH);
}
-static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ trace_dpu_hw_ctl_update_pending_flush(flushbits,
+ ctx->pending_flush_mask);
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
uint32_t flushbits = 0;
@@ -169,7 +167,7 @@
return flushbits;
}
-static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
uint32_t flushbits = 0;
@@ -202,7 +200,7 @@
return flushbits;
}
-static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
@@ -224,19 +222,6 @@
return 0;
}
-static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
- u32 *flushbits, enum dpu_cdm cdm)
-{
- switch (cdm) {
- case CDM_0:
- *flushbits |= BIT(26);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -310,7 +295,7 @@
u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
int i, j;
- u8 stages;
+ int stages;
int pipes_per_stage;
stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
@@ -485,13 +470,9 @@
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
- ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
@@ -499,7 +480,6 @@
{
struct dpu_hw_ctl *c;
struct dpu_ctl_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -518,18 +498,9 @@
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index c66a71f..d3ae939 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_CTL_H
@@ -142,10 +134,6 @@
u32 *flushbits,
enum dpu_intf blk);
- int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
- u32 *flushbits,
- enum dpu_cdm blk);
-
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c0b7f00..8bfa7d0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/bitops.h>
@@ -170,10 +162,6 @@
/**
* AD4 interrupt status bit definitions
*/
-#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
-#define DPU_INTR_DARKENH_UPDATED BIT(3)
-#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
-#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
/**
* struct dpu_intr_reg - array of DPU register sets
@@ -782,18 +770,6 @@
return -EINVAL;
}
-static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
- uint32_t mask)
-{
- if (!intr)
- return;
-
- DPU_REG_WRITE(&intr->hw, reg_off, mask);
-
- /* ensure register writes go through */
- wmb();
-}
-
static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
void (*cbfunc)(void *, int),
void *arg)
@@ -1004,18 +980,6 @@
return 0;
}
-static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
- uint32_t *mask)
-{
- if (!intr || !mask)
- return -EINVAL;
-
- *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
- | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
-
- return 0;
-}
-
static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
{
int i;
@@ -1065,19 +1029,6 @@
wmb();
}
-static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
- int irq_idx)
-{
- unsigned long irq_flags;
-
- if (!intr)
- return;
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
- dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
int irq_idx, bool clear)
{
@@ -1113,16 +1064,13 @@
static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
{
- ops->set_mask = dpu_hw_intr_set_mask;
ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
ops->enable_irq = dpu_hw_intr_enable_irq;
ops->disable_irq = dpu_hw_intr_disable_irq;
ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
- ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
- ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 61e4cba..4edcf40 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_INTERRUPTS_H
@@ -20,13 +12,6 @@
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
-#define IRQ_SOURCE_MDP BIT(0)
-#define IRQ_SOURCE_DSI0 BIT(4)
-#define IRQ_SOURCE_DSI1 BIT(5)
-#define IRQ_SOURCE_HDMI BIT(8)
-#define IRQ_SOURCE_EDP BIT(12)
-#define IRQ_SOURCE_MHL BIT(16)
-
/**
* dpu_intr_type - HW Interrupt Type
* @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
@@ -96,18 +81,6 @@
*/
struct dpu_hw_intr_ops {
/**
- * set_mask - Programs the given interrupt register with the
- * given interrupt mask. Register value will get overwritten.
- * @intr: HW interrupt handle
- * @reg_off: MDSS HW register offset
- * @irqmask: IRQ mask value
- */
- void (*set_mask)(
- struct dpu_hw_intr *intr,
- uint32_t reg,
- uint32_t irqmask);
-
- /**
* irq_idx_lookup - Lookup IRQ index on the HW interrupt type
* Used for all irq related ops
* @intr_type: Interrupt type defined in dpu_intr_type
@@ -177,16 +150,6 @@
struct dpu_hw_intr *intr);
/**
- * clear_interrupt_status - Clears HW interrupt status based on given
- * lookup IRQ index.
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- */
- void (*clear_interrupt_status)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
* clear_intr_status_nolock() - clears the HW interrupts without lock
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
@@ -206,21 +169,6 @@
struct dpu_hw_intr *intr,
int irq_idx,
bool clear);
-
- /**
- * get_valid_interrupts - Gets a mask of all valid interrupt sources
- * within DPU. These are actually status bits
- * within interrupt registers that specify the
- * source of the interrupt in IRQs. For example,
- * valid interrupt sources can be MDP, DSI,
- * HDMI etc.
- * @intr: HW interrupt handle
- * @mask: Returning the interrupt source MASK
- * @return: 0 for success, otherwise failure
- */
- int (*get_valid_interrupts)(
- struct dpu_hw_intr *intr,
- uint32_t *mask);
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index d280df5..dcd87cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define INTF_TIMING_ENGINE_EN 0x000
@@ -65,9 +56,6 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
-#define INTF_MISR_CTRL 0x180
-#define INTF_MISR_SIGNATURE 0x184
-
static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -246,30 +234,6 @@
}
}
-static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
- bool enable, u32 frame_count)
-{
- struct dpu_hw_blk_reg_map *c = &intf->hw;
- u32 config = 0;
-
- DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
- /* clear misr data */
- wmb();
-
- if (enable)
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
-}
-
-static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
-{
- struct dpu_hw_blk_reg_map *c = &intf->hw;
-
- return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
-}
-
static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c;
@@ -289,15 +253,10 @@
ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
- ops->setup_misr = dpu_hw_intf_setup_misr;
- ops->collect_misr = dpu_hw_intf_collect_misr;
ops->get_line_count = dpu_hw_intf_get_line_count;
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
@@ -305,7 +264,6 @@
{
struct dpu_hw_intf *c;
struct dpu_intf_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -326,18 +284,9 @@
c->mdss = m;
_setup_intf_ops(&c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index a79d735..b03acc2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_INTF_H
@@ -59,8 +51,6 @@
* @ setup_prog_fetch : enables/disables the programmable fetch logic
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
- * @ setup_misr: enables/disables MISR in HW register
- * @ collect_misr: reads and stores MISR data from HW register
* @ get_line_count: reads current vertical line counter
*/
struct dpu_hw_intf_ops {
@@ -77,11 +67,6 @@
void (*get_status)(struct dpu_hw_intf *intf,
struct intf_status *status);
- void (*setup_misr)(struct dpu_hw_intf *intf,
- bool enable, u32 frame_count);
-
- u32 (*collect_misr)(struct dpu_hw_intf *intf);
-
u32 (*get_line_count)(struct dpu_hw_intf *intf);
};
@@ -99,16 +84,6 @@
};
/**
- * to_dpu_hw_intf - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_intf, base);
-}
-
-/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 4ab72b0..5bc39ba 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dpu_kms.h"
@@ -15,8 +7,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_dbg.h"
-#include "dpu_kms.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
@@ -34,9 +24,6 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
-#define LM_MISR_CTRL 0x310
-#define LM_MISR_SIGNATURE 0x314
-
static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -67,16 +54,10 @@
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
{
const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
- int rc;
+ if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+ return sblk->blendstage_base[stage - DPU_STAGE_0];
- if (stage == DPU_STAGE_BASE)
- rc = -EINVAL;
- else if (stage <= sblk->maxblendstages)
- rc = sblk->blendstage_base[stage - DPU_STAGE_0];
- else
- rc = -EINVAL;
-
- return rc;
+ return -EINVAL;
}
static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
@@ -166,35 +147,6 @@
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
- void *cfg)
-{
-}
-
-static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
- bool enable, u32 frame_count)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 config = 0;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
- /* clear misr data */
- wmb();
-
- if (enable)
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, config);
-}
-
-static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
- return DPU_REG_READ(c, LM_MISR_SIGNATURE);
-}
-
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@@ -206,15 +158,9 @@
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
- ops->setup_gc = dpu_hw_lm_gc;
- ops->setup_misr = dpu_hw_lm_setup_misr;
- ops->collect_misr = dpu_hw_lm_collect_misr;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
@@ -222,7 +168,6 @@
{
struct dpu_hw_mixer *c;
struct dpu_lm_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -239,18 +184,9 @@
c->cap = cfg;
_setup_mixer_ops(m, &c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index e29e5da..147ace3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_LM_H
@@ -61,18 +53,6 @@
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
- /**
- * setup_gc : enable/disable gamma correction feature
- */
- void (*setup_gc)(struct dpu_hw_mixer *mixer,
- void *cfg);
-
- /* setup_misr: enables/disables MISR in HW register */
- void (*setup_misr)(struct dpu_hw_mixer *ctx,
- bool enable, u32 frame_count);
-
- /* collect_misr: reads and stores MISR data from HW register */
- u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 35e6bf9..6868821 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_MDSS_H
@@ -100,7 +92,6 @@
DPU_HW_BLK_SSPP,
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
- DPU_HW_BLK_CDM,
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
@@ -173,13 +164,6 @@
DSPP_MAX
};
-enum dpu_ds {
- DS_TOP,
- DS_0,
- DS_1,
- DS_MAX
-};
-
enum dpu_ctl {
CTL_0 = 1,
CTL_1,
@@ -189,12 +173,6 @@
CTL_MAX
};
-enum dpu_cdm {
- CDM_0 = 1,
- CDM_1,
- CDM_MAX
-};
-
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
@@ -246,12 +224,6 @@
WB_MAX
};
-enum dpu_ad {
- AD_0 = 0x1,
- AD_1,
- AD_MAX
-};
-
enum dpu_cwb {
CWB_0 = 0x1,
CWB_1,
@@ -278,12 +250,6 @@
VBIF_NRT = VBIF_1
};
-enum dpu_iommu_domain {
- DPU_IOMMU_DOMAIN_UNSECURE,
- DPU_IOMMU_DOMAIN_SECURE,
- DPU_IOMMU_DOMAIN_MAX
-};
-
/**
* DPU HW,Component order color map
*/
@@ -378,7 +344,6 @@
* @alpha_enable: whether the format has an alpha channel
* @num_planes: number of planes (including meta data planes)
* @fetch_mode: linear, tiled, or ubwc hw fetch behavior
- * @is_yuv: is format a yuv variant
* @flag: usage bit flags
* @tile_width: format tile width
* @tile_height: format tile height
@@ -451,15 +416,14 @@
* Define bit masks for h/w logging.
*/
#define DPU_DBG_MASK_NONE (1 << 0)
-#define DPU_DBG_MASK_CDM (1 << 1)
-#define DPU_DBG_MASK_INTF (1 << 2)
-#define DPU_DBG_MASK_LM (1 << 3)
-#define DPU_DBG_MASK_CTL (1 << 4)
-#define DPU_DBG_MASK_PINGPONG (1 << 5)
-#define DPU_DBG_MASK_SSPP (1 << 6)
-#define DPU_DBG_MASK_WB (1 << 7)
-#define DPU_DBG_MASK_TOP (1 << 8)
-#define DPU_DBG_MASK_VBIF (1 << 9)
-#define DPU_DBG_MASK_ROT (1 << 10)
+#define DPU_DBG_MASK_INTF (1 << 1)
+#define DPU_DBG_MASK_LM (1 << 2)
+#define DPU_DBG_MASK_CTL (1 << 3)
+#define DPU_DBG_MASK_PINGPONG (1 << 4)
+#define DPU_DBG_MASK_SSPP (1 << 5)
+#define DPU_DBG_MASK_WB (1 << 6)
+#define DPU_DBG_MASK_TOP (1 << 7)
+#define DPU_DBG_MASK_VBIF (1 << 8)
+#define DPU_DBG_MASK_ROT (1 << 9)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index cc3a623..5dbaba9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/iopoll.h>
@@ -16,7 +8,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_pingpong.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
@@ -177,7 +168,7 @@
height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
if (height < init)
- goto line_count_exit;
+ return line;
line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
@@ -186,7 +177,6 @@
else
line -= init;
-line_count_exit:
return line;
}
@@ -201,10 +191,7 @@
ops->get_line_count = dpu_hw_pp_get_line_count;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
@@ -212,7 +199,6 @@
{
struct dpu_hw_pingpong *c;
struct dpu_pingpong_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -228,18 +214,9 @@
c->caps = cfg;
_setup_pingpong_ops(&c->ops, c->caps);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3caccd7..58bdb92 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_PINGPONG_H
@@ -105,16 +97,6 @@
};
/**
- * dpu_hw_pingpong - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pingpong, base);
-}
-
-/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index c25b52a..4f8b813 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -1,20 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_sspp.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@@ -141,7 +132,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
-static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
{
@@ -662,7 +653,8 @@
test_bit(DPU_SSPP_CSC_10BIT, &features))
c->ops.setup_csc = dpu_hw_sspp_setup_csc;
- if (dpu_hw_sspp_multirect_enabled(c->cap))
+ if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
@@ -697,10 +689,7 @@
return ERR_PTR(-ENOMEM);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
@@ -708,7 +697,6 @@
{
struct dpu_hw_pipe *hw_pipe;
struct dpu_sspp_cfg *cfg;
- int rc;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
@@ -730,18 +718,9 @@
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
- rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
return hw_pipe;
-
-blk_init_error:
- kzfree(hw_pipe);
-
- return ERR_PTR(rc);
}
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 4d81e5f..a3680b4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_SSPP_H
@@ -392,16 +384,6 @@
};
/**
- * dpu_hw_pipe - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pipe, base);
-}
-
-/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
* @idx: Pipe index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index db2798e..f9af52a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define SSPP_SPARE 0x28
@@ -98,23 +89,6 @@
DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
}
-static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
- struct cdm_output_cfg *cfg)
-{
- struct dpu_hw_blk_reg_map *c;
- u32 out_ctl = 0;
-
- if (!mdp || !cfg)
- return;
-
- c = &mdp->hw;
-
- if (cfg->intf_en)
- out_ctl |= BIT(19);
-
- DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
-}
-
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
@@ -307,7 +281,6 @@
unsigned long cap)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
- ops->setup_cdm_output = dpu_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
@@ -340,10 +313,7 @@
return ERR_PTR(-EINVAL);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
@@ -351,7 +321,6 @@
{
struct dpu_hw_mdp *mdp;
const struct dpu_mdp_cfg *cfg;
- int rc;
if (!addr || !m)
return ERR_PTR(-EINVAL);
@@ -373,20 +342,9 @@
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
- rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+ dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
return mdp;
-
-blk_init_error:
- kzfree(mdp);
-
- return ERR_PTR(rc);
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 899925a..1d9d32e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_TOP_H
@@ -52,14 +44,6 @@
};
/**
- * struct cdm_output_cfg: output configuration for cdm
- * @intf_en : enable/disable interface output
- */
-struct cdm_output_cfg {
- bool intf_en;
-};
-
-/**
* struct dpu_danger_safe_status: danger and safe status signals
* @mdp: top level status
* @sspp: source pipe status
@@ -89,7 +73,6 @@
* Assumption is these functions will be called after clocks are enabled.
* @setup_split_pipe : Programs the pipe control registers
* @setup_pp_split : Programs the pp split control registers
- * @setup_cdm_output : programs cdm control
* @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
@@ -102,14 +85,6 @@
struct split_pipe_cfg *p);
/**
- * setup_cdm_output() : Setup selection control of the cdm data path
- * @mdp : mdp top context driver
- * @cfg : cdm output configuration
- */
- void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
- struct cdm_output_cfg *cfg);
-
- /**
* setup_traffic_shaper() : Setup traffic shaper control
* @mdp : mdp top context driver
* @cfg : traffic shaper configuration
@@ -178,16 +153,6 @@
};
/**
- * to_dpu_hw_mdp - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_mdp, base);
-}
-
-/**
* dpu_hw_mdptop_init - initializes the top driver for the passed idx
* @idx: Interface index for which driver object is required
* @addr: Mapped register io address of MDP
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 4cabae4..84e9875 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -1,13 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -50,9 +42,6 @@
#define QSEED3_CLK_CTRL0 0x54
#define QSEED3_CLK_CTRL1 0x58
#define QSEED3_CLK_STATUS 0x5C
-#define QSEED3_MISR_CTRL 0x70
-#define QSEED3_MISR_SIGNATURE_0 0x74
-#define QSEED3_MISR_SIGNATURE_1 0x78
#define QSEED3_PHASE_INIT_Y_H 0x90
#define QSEED3_PHASE_INIT_Y_V 0x94
#define QSEED3_PHASE_INIT_UV_H 0x98
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 1240f50..234eb7d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_UTIL_H
@@ -18,7 +10,6 @@
#include "dpu_hw_mdss.h"
#define REG_MASK(n) ((BIT(n)) - 1)
-struct dpu_format_extended;
/*
* This is the common struct maintained by each sub block
@@ -148,16 +139,6 @@
struct dpu_hw_scaler3_de_cfg de;
};
-struct dpu_hw_scaler3_lut_cfg {
- bool is_configured;
- u32 *dir_lut;
- size_t dir_len;
- u32 *cir_lut;
- size_t cir_len;
- u32 *sep_lut;
- size_t sep_len;
-};
-
/**
* struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
* @num_ext_pxls_lr: Number of total horizontal pixels
@@ -325,12 +306,6 @@
#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
-#define MISR_FRAME_COUNT_MASK 0xFF
-#define MISR_CTRL_ENABLE BIT(8)
-#define MISR_CTRL_STATUS BIT(9)
-#define MISR_CTRL_STATUS_CLEAR BIT(10)
-#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
-
void *dpu_hw_util_get_dir(void);
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index d439055..cf867f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
-#include "dpu_dbg.h"
#define VBIF_VERSION 0x0000
#define VBIF_CLK_FORCE_CTRL0 0x0008
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
index 471ff67..6417aa2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HW_VBIF_H
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
index 5b2bc9b..c8156ed 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DPU_HWIO_H
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index b557687..27fbeb5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -1,20 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/clk/clk-conf.h>
#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <drm/drm_print.h>
#include "dpu_io_util.h"
@@ -112,9 +107,9 @@
rc = -EPERM;
}
- if (rc) {
- msm_dss_enable_clk(&clk_arry[i],
- i, false);
+ if (rc && i) {
+ msm_dss_enable_clk(&clk_arry[i - 1],
+ i - 1, false);
break;
}
}
@@ -164,7 +159,7 @@
"clock-names", i,
&clock_name);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
i);
break;
}
@@ -176,13 +171,13 @@
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
goto err;
}
rc = of_clk_set_defaults(pdev->dev.of_node, false);
if (rc) {
- dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
goto err;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
index bc07381..e6b5c77 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DPU_IO_UTIL_H__
#define __DPU_IO_UTIL_H__
-#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -22,12 +13,6 @@
#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
-struct dss_gpio {
- unsigned int gpio;
- unsigned int value;
- char gpio_name[32];
-};
-
enum dss_clk_type {
DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
DSS_CLK_PCLK,
@@ -42,8 +27,6 @@
};
struct dss_module_power {
- unsigned int num_gpio;
- struct dss_gpio *gpio_config;
unsigned int num_clk;
struct dss_clk *clk_config;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
deleted file mode 100644
index d5e6ce0..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_irq.h"
-#include "dpu_core_irq.h"
-
-irqreturn_t dpu_irq(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- return dpu_core_irq(dpu_kms);
-}
-
-void dpu_irq_preinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!dpu_kms->dev || !dpu_kms->dev->dev) {
- pr_err("invalid device handles\n");
- return;
- }
-
- dpu_core_irq_preinstall(dpu_kms);
-}
-
-int dpu_irq_postinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- int rc;
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return -EINVAL;
- }
-
- rc = dpu_core_irq_postinstall(dpu_kms);
-
- return rc;
-}
-
-void dpu_irq_uninstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- dpu_core_irq_uninstall(dpu_kms);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
deleted file mode 100644
index 3e147f7..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DPU_IRQ_H__
-#define __DPU_IRQ_H__
-
-#include <linux/kernel.h>
-#include <linux/irqdomain.h>
-
-#include "msm_kms.h"
-
-/**
- * dpu_irq_controller - define MDSS level interrupt controller context
- * @enabled_mask: enable status of MDSS level interrupt
- * @domain: interrupt domain of this controller
- */
-struct dpu_irq_controller {
- unsigned long enabled_mask;
- struct irq_domain *domain;
-};
-
-/**
- * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: none
- */
-void dpu_irq_preinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: 0 if success; error code otherwise
- */
-int dpu_irq_postinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_uninstall - uninstall MDSS IRQ handler
- * @drm_dev: pointer to kms context
- * @return: none
- */
-void dpu_irq_uninstall(struct msm_kms *kms);
-
-/**
- * dpu_irq - MDSS level IRQ handler
- * @kms: pointer to kms context
- * @return: interrupt handling status
- */
-irqreturn_t dpu_irq(struct msm_kms *kms);
-
-#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 74cc204..58b0485 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1,27 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-#include <drm/drm_crtc.h>
#include <linux/debugfs.h>
-#include <linux/of_irq.h>
#include <linux/dma-buf.h>
+#include <linux/of_irq.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@@ -56,7 +47,7 @@
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
static int dpu_kms_hw_init(struct msm_kms *kms);
-static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
static unsigned long dpu_iomap_size(struct platform_device *pdev,
const char *name)
@@ -81,7 +72,7 @@
struct dpu_danger_safe_status status;
int i;
- if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
@@ -138,46 +129,27 @@
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
-static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_danger);
- dpu_kms->debugfs_danger = NULL;
-}
-
-static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- dpu_kms->debugfs_danger = debugfs_create_dir("danger",
- parent);
- if (!dpu_kms->debugfs_danger) {
- DPU_ERROR("failed to create danger debugfs\n");
- return -EINVAL;
- }
+ struct dentry *entry = debugfs_create_dir("danger", parent);
- debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("danger_status", 0600, entry,
dpu_kms, &dpu_debugfs_danger_stats_fops);
- debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
-
- return 0;
}
static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
- struct dpu_debugfs_regset32 *regset;
- struct dpu_kms *dpu_kms;
+ struct dpu_debugfs_regset32 *regset = s->private;
+ struct dpu_kms *dpu_kms = regset->dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
- if (!s || !s->private)
- return 0;
-
- regset = s->private;
-
- dpu_kms = regset->dpu_kms;
- if (!dpu_kms || !dpu_kms->mmio)
+ if (!dpu_kms->mmio)
return 0;
dev = dpu_kms->dev;
@@ -235,72 +207,36 @@
}
}
-void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent, struct dpu_debugfs_regset32 *regset)
{
if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
- return NULL;
+ return;
/* make sure offset is a multiple of 4 */
regset->offset = round_down(regset->offset, 4);
- return debugfs_create_file(name, mode, parent,
- regset, &dpu_fops_regset32);
+ debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
}
-static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
- void *p;
- int rc;
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ void *p = dpu_hw_util_get_log_mask_ptr();
+ struct dentry *entry;
- p = dpu_hw_util_get_log_mask_ptr();
-
- if (!dpu_kms || !p)
+ if (!p)
return -EINVAL;
- dpu_kms->debugfs_root = debugfs_create_dir("debug",
- dpu_kms->dev->primary->debugfs_root);
- if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
- DRM_ERROR("debugfs create_dir failed %ld\n",
- PTR_ERR(dpu_kms->debugfs_root));
- return PTR_ERR(dpu_kms->debugfs_root);
- }
+ entry = debugfs_create_dir("debug", minor->debugfs_root);
- rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
- if (rc) {
- DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
- return rc;
- }
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
- /* allow root to be NULL */
- debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+ dpu_debugfs_danger_init(dpu_kms, entry);
+ dpu_debugfs_vbif_init(dpu_kms, entry);
+ dpu_debugfs_core_irq_init(dpu_kms, entry);
- (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
-
- rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
- if (rc) {
- DPU_ERROR("failed to init perf %d\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
- /* don't need to NULL check debugfs_root */
- if (dpu_kms) {
- dpu_debugfs_vbif_destroy(dpu_kms);
- dpu_debugfs_danger_destroy(dpu_kms);
- dpu_debugfs_core_irq_destroy(dpu_kms);
- debugfs_remove_recursive(dpu_kms->debugfs_root);
- }
-}
-#else
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
+ return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@@ -314,13 +250,42 @@
dpu_crtc_vblank(crtc, false);
}
+static void dpu_kms_enable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+}
+
+static void dpu_kms_disable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
+static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ ktime_t vsync_time;
+
+ if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
+ return vsync_time;
+ }
+
+ return ktime_get();
+}
+
static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct dpu_kms *dpu_kms;
struct msm_drm_private *priv;
struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
+ int i;
if (!kms)
return;
@@ -330,11 +295,28 @@
if (!dev || !dev->dev_private)
return;
priv = dev->dev_private;
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc != NULL)
+ /* Call prepare_commit for all affected encoders */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc_state->encoder_mask) {
dpu_encoder_prepare_commit(encoder);
+ }
+ }
+}
+
+static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
}
/*
@@ -344,59 +326,31 @@
void dpu_kms_encoder_enable(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
/* Forward this enable call to the commit hook */
if (funcs && funcs->commit)
funcs->commit(encoder);
- if (crtc && crtc->state->active) {
- trace_dpu_kms_enc_enable(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
- }
-}
-
-static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int i;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- /* If modeset is required, kickoff is run in encoder_enable */
- if (drm_atomic_crtc_needs_modeset(crtc_state))
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_for_each_crtc(crtc, dev) {
+ if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
continue;
- if (crtc->state->active) {
- trace_dpu_kms_commit(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
- }
+ trace_dpu_kms_enc_enable(DRMID(crtc));
}
}
-static void dpu_kms_complete_commit(struct msm_kms *kms,
- struct drm_atomic_state *old_state)
+static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
{
- struct dpu_kms *dpu_kms;
- struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state;
- int i;
-
- if (!kms || !old_state)
- return;
- dpu_kms = to_dpu_kms(kms);
-
- if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
- return;
- priv = dpu_kms->dev->dev_private;
DPU_ATRACE_BEGIN("kms_complete_commit");
- for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
- dpu_crtc_complete_commit(crtc, old_crtc_state);
-
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_crtc_complete_commit(crtc);
DPU_ATRACE_END("kms_complete_commit");
}
@@ -442,35 +396,47 @@
}
}
-static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_kms_wait_for_commit_done(kms, crtc);
+}
+
+static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
- int i, rc;
+ int i, rc = 0;
+
+ if (!(priv->dsi[0] || priv->dsi[1]))
+ return rc;
/*TODO: Support two independent DSI connectors */
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
- if (IS_ERR_OR_NULL(encoder)) {
+ if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
- return;
+ return PTR_ERR(encoder);
}
priv->encoders[priv->num_encoders++] = encoder;
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (!priv->dsi[i]) {
- DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
- return;
- }
+ if (!priv->dsi[i])
+ continue;
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
- continue;
+ break;
}
}
+
+ return rc;
}
/**
@@ -481,16 +447,16 @@
* @dpu_kms: Pointer to dpu kms structure
* Returns: Zero on success
*/
-static void _dpu_kms_setup_displays(struct drm_device *dev,
+static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
- _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
-
/**
* Extend this function to initialize other
* types of displays
*/
+
+ return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
}
static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@@ -531,19 +497,15 @@
{
struct drm_device *dev;
struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
struct drm_crtc *crtc;
struct msm_drm_private *priv;
struct dpu_mdss_cfg *catalog;
- int primary_planes_idx = 0, i, ret;
+ int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
- DPU_ERROR("invalid dpu_kms\n");
- return -EINVAL;
- }
-
dev = dpu_kms->dev;
priv = dev->dev_private;
catalog = dpu_kms->catalog;
@@ -552,20 +514,30 @@
* Create encoder and query display drivers to create
* bridges and connectors
*/
- _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ if (ret)
+ goto fail;
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
- /* Create the planes */
+ /* Create the planes, keeping track of one primary/cursor per crtc */
for (i = 0; i < catalog->sspp_count; i++) {
- bool primary = true;
+ enum drm_plane_type type;
- if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
- || primary_planes_idx >= max_crtc_count)
- primary = false;
+ if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
+ && cursor_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
- plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
- (1UL << max_crtc_count) - 1, 0);
+ DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
+ type, catalog->sspp[i].features,
+ catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1, 0);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -573,7 +545,9 @@
}
priv->planes[priv->num_planes++] = plane;
- if (primary)
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor_planes[cursor_planes_idx++] = plane;
+ else if (type == DRM_PLANE_TYPE_PRIMARY)
primary_planes[primary_planes_idx++] = plane;
}
@@ -581,7 +555,7 @@
/* Create one CRTC per encoder */
for (i = 0; i < max_crtc_count; i++) {
- crtc = dpu_crtc_init(dev, primary_planes[i]);
+ crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
goto fail;
@@ -599,28 +573,6 @@
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- struct drm_device *dev;
- int rc;
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
- DPU_ERROR("invalid dpu_kms\n");
- return -EINVAL;
- }
-
- dev = dpu_kms->dev;
-
- rc = _dpu_debugfs_init(dpu_kms);
- if (rc)
- DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
-
- return rc;
-}
-#endif
-
static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder)
{
@@ -640,12 +592,7 @@
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
- if (dpu_kms->power_event)
- dpu_power_handle_unregister_event(
- &dpu_kms->phandle, dpu_kms->power_event);
-
/* safe to call these more than once during shutdown */
- _dpu_debugfs_destroy(dpu_kms);
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
@@ -665,11 +612,6 @@
dpu_hw_catalog_deinit(dpu_kms->catalog);
dpu_kms->catalog = NULL;
- if (dpu_kms->core_client)
- dpu_power_client_destroy(&dpu_kms->phandle,
- dpu_kms->core_client);
- dpu_kms->core_client = NULL;
-
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
@@ -678,6 +620,10 @@
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
dpu_kms->vbif[VBIF_RT] = NULL;
+ if (dpu_kms->hw_mdp)
+ dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
+ dpu_kms->hw_mdp = NULL;
+
if (dpu_kms->mmio)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
dpu_kms->mmio = NULL;
@@ -694,131 +640,9 @@
dpu_kms = to_dpu_kms(kms);
- dpu_dbg_destroy();
_dpu_kms_hw_destroy(dpu_kms);
}
-static int dpu_kms_pm_suspend(struct device *dev)
-{
- struct drm_device *ddev;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_atomic_state *state;
- struct dpu_kms *dpu_kms;
- int ret = 0, num_crtcs = 0;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- /* disable hot-plug polling */
- drm_kms_helper_poll_disable(ddev);
-
- /* acquire modeset lock(s) */
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- DPU_ATRACE_BEGIN("kms_pm_suspend");
-
- ret = drm_modeset_lock_all_ctx(ddev, &ctx);
- if (ret)
- goto unlock;
-
- /* save current state for resume */
- if (dpu_kms->suspend_state)
- drm_atomic_state_put(dpu_kms->suspend_state);
- dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
- if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
- DRM_ERROR("failed to back up suspend state\n");
- dpu_kms->suspend_state = NULL;
- goto unlock;
- }
-
- /* create atomic state to disable all CRTCs */
- state = drm_atomic_state_alloc(ddev);
- if (IS_ERR_OR_NULL(state)) {
- DRM_ERROR("failed to allocate crtc disable state\n");
- goto unlock;
- }
-
- state->acquire_ctx = &ctx;
-
- /* check for nothing to do */
- if (num_crtcs == 0) {
- DRM_DEBUG("all crtcs are already in the off state\n");
- drm_atomic_state_put(state);
- goto suspended;
- }
-
- /* commit the "disable all" state */
- ret = drm_atomic_commit(state);
- if (ret < 0) {
- DRM_ERROR("failed to disable crtcs, %d\n", ret);
- drm_atomic_state_put(state);
- goto unlock;
- }
-
-suspended:
- dpu_kms->suspend_block = true;
-
-unlock:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- DPU_ATRACE_END("kms_pm_suspend");
- return 0;
-}
-
-static int dpu_kms_pm_resume(struct device *dev)
-{
- struct drm_device *ddev;
- struct dpu_kms *dpu_kms;
- int ret;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- DPU_ATRACE_BEGIN("kms_pm_resume");
-
- drm_mode_config_reset(ddev);
-
- drm_modeset_lock_all(ddev);
-
- dpu_kms->suspend_block = false;
-
- if (dpu_kms->suspend_state) {
- dpu_kms->suspend_state->acquire_ctx =
- ddev->mode_config.acquire_ctx;
- ret = drm_atomic_commit(dpu_kms->suspend_state);
- if (ret < 0) {
- DRM_ERROR("failed to restore state, %d\n", ret);
- drm_atomic_state_put(dpu_kms->suspend_state);
- }
- dpu_kms->suspend_state = NULL;
- }
- drm_modeset_unlock_all(ddev);
-
- /* enable hot-plug polling */
- drm_kms_helper_poll_enable(ddev);
-
- DPU_ATRACE_END("kms_pm_resume");
- return 0;
-}
-
static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
@@ -847,23 +671,44 @@
encoder->base.id, rc);
}
+static irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+static void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+static void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
- .irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
+ .enable_commit = dpu_kms_enable_commit,
+ .disable_commit = dpu_kms_disable_commit,
+ .vsync_time = dpu_kms_vsync_time,
.prepare_commit = dpu_kms_prepare_commit,
- .commit = dpu_kms_commit,
+ .flush_commit = dpu_kms_flush_commit,
+ .wait_flush = dpu_kms_wait_flush,
.complete_commit = dpu_kms_complete_commit,
- .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
.enable_vblank = dpu_kms_enable_vblank,
.disable_vblank = dpu_kms_disable_vblank,
.check_modified_format = dpu_format_check_modified_format,
.get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk,
- .pm_suspend = dpu_kms_pm_suspend,
- .pm_resume = dpu_kms_pm_resume,
.destroy = dpu_kms_destroy,
.set_encoder_mode = _dpu_kms_set_encoder_mode,
#ifdef CONFIG_DEBUG_FS
@@ -871,23 +716,20 @@
#endif
};
-/* the caller api needs to turn on clock before calling it */
-static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
-{
- dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
-}
-
-static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
+ if (!dpu_kms->base.aspace)
+ return;
+
mmu = dpu_kms->base.aspace->mmu;
mmu->funcs->detach(mmu, (const char **)iommu_ports,
ARRAY_SIZE(iommu_ports));
msm_gem_address_space_put(dpu_kms->base.aspace);
- return 0;
+ dpu_kms->base.aspace = NULL;
}
static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
@@ -900,28 +742,26 @@
if (!domain)
return 0;
+ domain->geometry.aperture_start = 0x1000;
+ domain->geometry.aperture_end = 0xffffffff;
+
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
domain, "dpu1");
if (IS_ERR(aspace)) {
- ret = PTR_ERR(aspace);
- goto fail;
+ iommu_domain_free(domain);
+ return PTR_ERR(aspace);
}
- dpu_kms->base.aspace = aspace;
-
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
DPU_ERROR("failed to attach iommu %d\n", ret);
msm_gem_address_space_put(aspace);
- goto fail;
+ return ret;
}
+ dpu_kms->base.aspace = aspace;
return 0;
-fail:
- _dpu_kms_mmu_destroy(dpu_kms);
-
- return ret;
}
static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
@@ -949,17 +789,6 @@
return clk_get_rate(clk->clk);
}
-static void dpu_kms_handle_power_event(u32 event_type, void *usr)
-{
- struct dpu_kms *dpu_kms = usr;
-
- if (!dpu_kms)
- return;
-
- if (event_type == DPU_POWER_EVENT_POST_ENABLE)
- dpu_vbif_init_memtypes(dpu_kms);
-}
-
static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
@@ -969,28 +798,24 @@
if (!kms) {
DPU_ERROR("invalid kms\n");
- goto end;
+ return rc;
}
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
if (!dev) {
DPU_ERROR("invalid device\n");
- goto end;
- }
-
- rc = dpu_dbg_init(&dpu_kms->pdev->dev);
- if (rc) {
- DRM_ERROR("failed to init dpu dbg: %d\n", rc);
- goto end;
+ return rc;
}
priv = dev->dev_private;
if (!priv) {
DPU_ERROR("invalid private data\n");
- goto dbg_destroy;
+ return rc;
}
+ atomic_set(&dpu_kms->bandwidth_ref, 0);
+
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
if (IS_ERR(dpu_kms->mmio)) {
rc = PTR_ERR(dpu_kms->mmio);
@@ -1026,20 +851,9 @@
dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
}
- dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
- "core");
- if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
- rc = PTR_ERR(dpu_kms->core_client);
- if (!dpu_kms->core_client)
- rc = -EINVAL;
- DPU_ERROR("dpu power client create failed: %d\n", rc);
- dpu_kms->core_client = NULL;
- goto error;
- }
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- _dpu_kms_core_hw_rev_init(dpu_kms);
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
@@ -1053,8 +867,6 @@
goto power_error;
}
- dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
-
/*
* Now we need to read the HW catalog and initialize resources such as
* clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@@ -1065,8 +877,7 @@
goto power_error;
}
- rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
- dpu_kms->dev);
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
goto power_error;
@@ -1074,11 +885,10 @@
dpu_kms->rm_init = true;
- dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
- if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
+ dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
- if (!dpu_kms->hw_mdp)
- rc = -EINVAL;
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL;
goto power_error;
@@ -1100,7 +910,6 @@
}
rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
- &dpu_kms->phandle,
_dpu_kms_get_clk(dpu_kms, "core"));
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
@@ -1115,16 +924,6 @@
goto hw_intr_init_err;
}
- /*
- * _dpu_kms_drm_obj_init should create the DRM related objects
- * i.e. CRTCs, planes, encoders, connectors and so forth
- */
- rc = _dpu_kms_drm_obj_init(dpu_kms);
- if (rc) {
- DPU_ERROR("modeset init failed: %d\n", rc);
- goto drm_obj_init_err;
- }
-
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -1142,13 +941,16 @@
dev->mode_config.allow_fb_modifiers = true;
/*
- * Handle (re)initializations during power enable
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
*/
- dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
- dpu_kms->power_event = dpu_power_handle_register_event(
- &dpu_kms->phandle,
- DPU_POWER_EVENT_POST_ENABLE,
- dpu_kms_handle_power_event, dpu_kms, "kms");
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
@@ -1162,9 +964,7 @@
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
-dbg_destroy:
- dpu_dbg_destroy();
-end:
+
return rc;
}
@@ -1212,8 +1012,6 @@
return ret;
}
- dpu_power_resource_init(pdev, &dpu_kms->phandle);
-
platform_set_drvdata(pdev, dpu_kms);
msm_kms_init(&dpu_kms->base, &kms_funcs);
@@ -1233,7 +1031,6 @@
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
- dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
@@ -1269,19 +1066,13 @@
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, false);
- if (rc)
- DPU_ERROR("resource disable failed: %d\n", rc);
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
-exit:
return rc;
}
@@ -1290,27 +1081,27 @@
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_encoder *encoder;
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, true);
- if (rc)
- DPU_ERROR("resource enable failed: %d\n", rc);
+ dpu_vbif_init_memtypes(dpu_kms);
-exit:
+ drm_for_each_encoder(encoder, ddev)
+ dpu_encoder_virt_runtime_resume(encoder);
+
return rc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 66d4666..4c889aa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -1,37 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __DPU_KMS_H__
#define __DPU_KMS_H__
+#include <drm/drm_drv.h>
+
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_mmu.h"
#include "msm_gem.h"
-#include "dpu_dbg.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h"
+#include "dpu_io_util.h"
#include "dpu_rm.h"
-#include "dpu_power_handle.h"
-#include "dpu_irq.h"
#include "dpu_core_perf.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -75,9 +64,6 @@
#define DPU_NAME_SIZE 12
-/* timeout in frames waiting for frame done */
-#define DPU_FRAME_DONE_TIMEOUT 60
-
/*
* struct dpu_irq_callback - IRQ callback handlers
* @list: list to callback
@@ -104,7 +90,6 @@
atomic_t *enable_counts;
atomic_t *irq_counts;
spinlock_t cb_lock;
- struct dentry *debugfs_file;
};
struct dpu_kms {
@@ -113,15 +98,6 @@
int core_rev;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle phandle;
- struct dpu_power_client *core_client;
- struct dpu_power_event *power_event;
-
- /* directory entry for debugfs */
- struct dentry *debugfs_root;
- struct dentry *debugfs_danger;
- struct dentry *debugfs_vbif;
-
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
@@ -135,10 +111,6 @@
struct dpu_core_perf perf;
- /* saved atomic state during system suspend */
- struct drm_atomic_state *suspend_state;
- bool suspend_block;
-
struct dpu_rm rm;
bool rm_init;
@@ -150,6 +122,14 @@
struct platform_device *pdev;
bool rpm_enabled;
struct dss_module_power mp;
+
+ /* reference count bandwidth requests, so we know when we can
+ * release bandwidth. Each atomic update increments, and frame-
+ * done event decrements. Additionally, for video mode, the
+ * reference is incremented when crtc is enabled, and decremented
+ * when disabled.
+ */
+ atomic_t bandwidth_ref;
};
struct vsync_info {
@@ -164,33 +144,6 @@
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
/**
- * dpu_kms_is_suspend_state - whether or not the system is pm suspended
- * @dev: Pointer to drm device
- * Return: Suspend status
- */
-static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
-{
- if (!ddev_to_msm_kms(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
-}
-
-/**
- * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
- * suspend status
- * @dev: Pointer to drm device
- * Return: True if commits should be rejected due to pm suspend
- */
-static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
-{
- if (!dpu_kms_is_suspend_state(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
-}
-
-/**
* Debugfs functions - extra helper functions for debugfs support
*
* Main debugfs documentation is located at,
@@ -243,12 +196,8 @@
* @mode: File mode within debugfs
* @parent: Parent directory entry within debugfs, can be NULL
* @regset: Pointer to persistent register block definition
- *
- * Return: dentry pointer for newly created file, use either debugfs_remove()
- * or debugfs_remove_recursive() (on a parent directory) to remove the
- * file
*/
-void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent, struct dpu_debugfs_regset32 *regset);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 9e533b8..29705e7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -3,26 +3,73 @@
* Copyright (c) 2018, The Linux Foundation
*/
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdesc.h>
+#include <linux/irqchip/chained_irq.h>
#include "dpu_kms.h"
+#include <linux/interconnect.h>
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
#define HW_INTR_STATUS 0x0010
+/* Max BW defined in KBps */
+#define MAX_BW 6800000
+
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
unsigned long mmio_len;
- u32 hwversion;
struct dss_module_power mp;
struct dpu_irq_controller irq_controller;
+ struct icc_path *path[2];
+ u32 num_paths;
};
-static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
+ struct dpu_mdss *dpu_mdss)
{
- struct dpu_mdss *dpu_mdss = arg;
+ struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem");
+ struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem");
+
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ dpu_mdss->path[0] = path0;
+ dpu_mdss->num_paths = 1;
+
+ if (!IS_ERR_OR_NULL(path1)) {
+ dpu_mdss->path[1] = path1;
+ dpu_mdss->num_paths++;
+ }
+
+ return 0;
+}
+
+static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ int i;
+ u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0;
+
+ for (i = 0; i < dpu_mdss->num_paths; i++)
+ icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW));
+}
+
+static void dpu_mdss_irq(struct irq_desc *desc)
+{
+ struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
u32 interrupts;
+ chained_irq_enter(chip, desc);
+
interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
while (interrupts) {
@@ -34,20 +81,20 @@
hwirq);
if (mapping == 0) {
DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
- return IRQ_NONE;
+ break;
}
rc = generic_handle_irq(mapping);
if (rc < 0) {
DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
hwirq, mapping, rc);
- return IRQ_NONE;
+ break;
}
interrupts &= ~(1 << hwirq);
}
- return IRQ_HANDLED;
+ chained_irq_exit(chip, desc);
}
static void dpu_mdss_irq_mask(struct irq_data *irqd)
@@ -78,16 +125,16 @@
.irq_unmask = dpu_mdss_irq_unmask,
};
+static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
+
static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
struct dpu_mdss *dpu_mdss = domain->host_data;
- int ret;
+ irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
- ret = irq_set_chip_data(irq, dpu_mdss);
-
- return ret;
+ return irq_set_chip_data(irq, dpu_mdss);
}
static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
@@ -115,13 +162,12 @@
return 0;
}
-static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
{
if (dpu_mdss->irq_controller.domain) {
irq_domain_remove(dpu_mdss->irq_controller.domain);
dpu_mdss->irq_controller.domain = NULL;
}
- return 0;
}
static int dpu_mdss_enable(struct msm_mdss *mdss)
{
@@ -129,6 +175,8 @@
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
+ dpu_mdss_icc_request_bw(mdss);
+
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (ret)
DPU_ERROR("clock enable failed, ret:%d\n", ret);
@@ -140,12 +188,15 @@
{
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
- int ret;
+ int ret, i;
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (ret)
DPU_ERROR("clock disable failed, ret:%d\n", ret);
+ for (i = 0; i < dpu_mdss->num_paths; i++)
+ icc_set_bw(dpu_mdss->path[i], 0, 0);
+
return ret;
}
@@ -155,17 +206,23 @@
struct msm_drm_private *priv = dev->dev_private;
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
+ int irq;
+ int i;
+ pm_runtime_suspend(dev->dev);
+ pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
-
+ irq = platform_get_irq(pdev, 0);
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
+ for (i = 0; i < dpu_mdss->num_paths; i++)
+ icc_put(dpu_mdss->path[i]);
+
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
-
- pm_runtime_disable(dev->dev);
priv->mdss = NULL;
}
@@ -183,6 +240,7 @@
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret = 0;
+ int irq;
dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
if (!dpu_mdss)
@@ -201,6 +259,10 @@
}
dpu_mdss->mmio_len = resource_size(res);
+ ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
+ if (ret)
+ return ret;
+
mp = &dpu_mdss->mp;
ret = msm_dss_parse_clock(pdev, mp);
if (ret) {
@@ -215,20 +277,18 @@
if (ret)
goto irq_domain_error;
- ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
- dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
- if (ret) {
- DPU_ERROR("failed to init irq: %d\n", ret);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
goto irq_error;
- }
+
+ irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
+ dpu_mdss);
+
+ priv->mdss = &dpu_mdss->base;
pm_runtime_enable(dev->dev);
- pm_runtime_get_sync(dev->dev);
- dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
- pm_runtime_put_sync(dev->dev);
-
- priv->mdss = &dpu_mdss->base;
+ dpu_mdss_icc_request_bw(priv->mdss);
return ret;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 4ac2b0c..58d5acb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -21,6 +10,11 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_formats.h"
@@ -93,8 +87,6 @@
enum dpu_sspp pipe;
uint32_t features; /* capabilities from catalog */
- uint32_t nformats;
- uint32_t formats[64];
struct dpu_hw_pipe *pipe_hw;
struct dpu_hw_pipe_cfg pipe_cfg;
@@ -119,30 +111,21 @@
bool debugfs_default_scale;
};
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_QCOM_COMPRESSED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
{
- struct msm_drm_private *priv;
+ struct msm_drm_private *priv = plane->dev->dev_private;
- if (!plane || !plane->dev)
- return NULL;
- priv = plane->dev->dev_private;
- if (!priv)
- return NULL;
return to_dpu_kms(priv->kms);
}
-static bool dpu_plane_enabled(struct drm_plane_state *state)
-{
- return state && state->fb && state->crtc;
-}
-
-static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
-{
- return state && state->crtc;
-}
-
/**
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
* @plane: Pointer to drm plane
@@ -150,7 +133,7 @@
* @src_wdith: width of source buffer
* Return: fill level corresponding to the source buffer/format or 0 if error
*/
-static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu, *tmp;
@@ -158,7 +141,7 @@
u32 fixed_buff_size;
u32 total_fl;
- if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ if (!fmt || !plane->state || !src_width || !fmt->bpp) {
DPU_ERROR("invalid arguments\n");
return 0;
}
@@ -168,7 +151,7 @@
fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
- if (!dpu_plane_enabled(tmp->base.state))
+ if (!tmp->base.state->visible)
continue;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
@@ -239,26 +222,11 @@
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u64 qos_lut;
u32 total_fl = 0, lut_usage;
- if (!plane || !fb) {
- DPU_ERROR("invalid arguments plane %d fb %d\n",
- plane != 0, fb != 0);
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
- return;
- }
-
if (!pdpu->is_rt_pipe) {
lut_usage = DPU_QOS_LUT_USAGE_NRT;
} else {
@@ -300,24 +268,10 @@
static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u32 danger_lut, safe_lut;
- if (!plane || !fb) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
- return;
- }
-
if (!pdpu->is_rt_pipe) {
danger_lut = pdpu->catalog->perf.danger_lut_tbl
[DPU_QOS_LUT_USAGE_NRT];
@@ -371,21 +325,7 @@
static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
bool enable, u32 flags)
{
- struct dpu_plane *pdpu;
-
- if (!plane) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
- return;
- }
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
@@ -421,37 +361,6 @@
&pdpu->pipe_qos_cfg);
}
-int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
-{
- struct dpu_plane *pdpu;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->is_rt_pipe)
- goto end;
-
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
-end:
- return 0;
-}
-
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
@@ -460,29 +369,9 @@
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
struct drm_crtc *crtc)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev || !crtc) {
- DPU_ERROR("invalid arguments plane %d crtc %d\n",
- plane != 0, crtc != 0);
- return;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR("invalid pipe reference\n");
- return;
- }
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
@@ -490,7 +379,7 @@
ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
- ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
ot_params.vbif_idx = VBIF_RT;
ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
ot_params.rd = true;
@@ -504,28 +393,9 @@
*/
static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_qos_params qos_params;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR("invalid pipe reference\n");
- return;
- }
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = VBIF_RT;
@@ -543,60 +413,16 @@
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
}
-/**
- * _dpu_plane_get_aspace: gets the address space
- */
-static int _dpu_plane_get_aspace(
- struct dpu_plane *pdpu,
- struct dpu_plane_state *pstate,
- struct msm_gem_address_space **aspace)
-{
- struct dpu_kms *kms;
-
- if (!pdpu || !pstate || !aspace) {
- DPU_ERROR("invalid parameters\n");
- return -EINVAL;
- }
-
- kms = _dpu_plane_get_kms(&pdpu->base);
- if (!kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- *aspace = kms->base.aspace;
-
- return 0;
-}
-
-static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+static void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_plane_state *pstate,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
- struct msm_gem_address_space *aspace = NULL;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ struct msm_gem_address_space *aspace = kms->base.aspace;
int ret;
- if (!plane || !pstate || !pipe_cfg || !fb) {
- DPU_ERROR(
- "invalid arg(s), plane %d state %d cfg %d fb %d\n",
- plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
- return;
- }
-
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
- return;
- }
-
- ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
- return;
- }
-
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
@@ -620,15 +446,6 @@
{
uint32_t i;
- if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
- !chroma_subsmpl_v) {
- DPU_ERROR(
- "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
- !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
- chroma_subsmpl_v);
- return;
- }
-
memset(scale_cfg, 0, sizeof(*scale_cfg));
memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
@@ -681,7 +498,7 @@
scale_cfg->enable = 1;
}
-static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
{
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
@@ -732,23 +549,9 @@
struct dpu_plane_state *pstate,
const struct dpu_format *fmt, bool color_fill)
{
- struct dpu_hw_pixel_ext *pe;
- uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
-
- if (!pdpu || !fmt || !pstate) {
- DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
- pdpu != 0, fmt != 0, pstate != 0);
- return;
- }
-
- pe = &pstate->pixel_ext;
+ const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
/* don't chroma subsample if decimating */
- chroma_subsmpl_h =
- drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
- chroma_subsmpl_v =
- drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
-
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pdpu, pstate,
drm_rect_width(&pdpu->pipe_cfg.src_rect),
@@ -756,7 +559,7 @@
drm_rect_width(&pdpu->pipe_cfg.dst_rect),
drm_rect_height(&pdpu->pipe_cfg.dst_rect),
&pstate->scaler3_cfg, fmt,
- chroma_subsmpl_h, chroma_subsmpl_v);
+ info->hsub, info->vsub);
}
/**
@@ -770,21 +573,8 @@
uint32_t color, uint32_t alpha)
{
const struct dpu_format *fmt;
- const struct drm_plane *plane;
- struct dpu_plane_state *pstate;
-
- if (!pdpu || !pdpu->base.state) {
- DPU_ERROR("invalid plane\n");
- return -EINVAL;
- }
-
- if (!pdpu->pipe_hw) {
- DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
- return -EINVAL;
- }
-
- plane = &pdpu->base;
- pstate = to_dpu_plane_state(plane->state);
+ const struct drm_plane *plane = &pdpu->base;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
DPU_DEBUG_PLANE(pdpu, "\n");
@@ -835,12 +625,7 @@
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
{
- struct dpu_plane_state *pstate;
-
- if (!drm_state)
- return;
-
- pstate = to_dpu_plane_state(drm_state);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(drm_state);
pstate->multirect_index = DPU_SSPP_RECT_SOLO;
pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
@@ -971,15 +756,6 @@
void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp)
{
- struct dpu_plane_state *pstate;
-
- if (!plane || !flush_sspp) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- pstate = to_dpu_plane_state(plane->state);
-
*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
}
@@ -990,10 +766,7 @@
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
- struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
- struct dma_fence *fence;
- struct msm_gem_address_space *aspace;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@@ -1001,25 +774,15 @@
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
- ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
- return ret;
- }
-
/* cache aspace */
- pstate->aspace = aspace;
+ pstate->aspace = kms->base.aspace;
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
* we can use msm_atomic_prepare_fb() instead of doing the
* implicit fence and fb prepare by hand here.
*/
- obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
- if (fence)
- drm_atomic_set_fence_for_plane(new_state, fence);
+ drm_gem_fb_prepare_fb(plane, new_state);
if (pstate->aspace) {
ret = msm_framebuffer_prepare(new_state->fb,
@@ -1076,33 +839,30 @@
drm_rect_equals(fb_rect, src);
}
-static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- int ret = 0;
- struct dpu_plane *pdpu;
- struct dpu_plane_state *pstate;
+ int ret = 0, min_scale;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ const struct drm_crtc_state *crtc_state = NULL;
const struct dpu_format *fmt;
struct drm_rect src, dst, fb_rect = { 0 };
- uint32_t max_upscale = 1, max_downscale = 1;
uint32_t min_src_size, max_linewidth;
- int hscale = 1, vscale = 1;
- if (!plane || !state) {
- DPU_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
- ret = -EINVAL;
- goto exit;
+ if (state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state->state,
+ state->crtc);
+
+ min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
+ pdpu->pipe_sblk->maxupscale << 16,
+ true, true);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ return ret;
}
-
- pdpu = to_dpu_plane(plane);
- pstate = to_dpu_plane_state(state);
-
- if (!pdpu->pipe_sblk) {
- DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
- ret = -EINVAL;
- goto exit;
- }
+ if (!state->visible)
+ return 0;
src.x1 = state->src_x >> 16;
src.y1 = state->src_y >> 16;
@@ -1116,25 +876,6 @@
max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
- if (pdpu->features & DPU_SSPP_SCALER) {
- max_downscale = pdpu->pipe_sblk->maxdwnscale;
- max_upscale = pdpu->pipe_sblk->maxupscale;
- }
- if (drm_rect_width(&src) < drm_rect_width(&dst))
- hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
- else
- hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
- if (drm_rect_height(&src) < drm_rect_height(&dst))
- vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
- else
- vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
-
- DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
- dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
-
- if (!dpu_plane_enabled(state))
- goto exit;
-
fmt = to_dpu_format(msm_framebuffer_format(state->fb));
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -1145,13 +886,13 @@
| BIT(DPU_SSPP_CSC_10BIT))))) {
DPU_ERROR_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
- ret = -EINVAL;
+ return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
- ret = -E2BIG;
+ return -E2BIG;
/* valid yuv image */
} else if (DPU_FORMAT_IS_YUV(fmt) &&
@@ -1160,41 +901,22 @@
drm_rect_height(&src) & 0x1)) {
DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
- ret = -EINVAL;
+ return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
- ret = -EINVAL;
+ return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
- ret = -E2BIG;
-
- /* check scaler capability */
- } else if (hscale < 0 || vscale < 0) {
- DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
- DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
- ret = -E2BIG;
+ return -E2BIG;
}
-exit:
- return ret;
-}
-
-static int dpu_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- if (!state->fb)
- return 0;
-
- DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
-
- return dpu_plane_sspp_atomic_check(plane, state);
+ return 0;
}
void dpu_plane_flush(struct drm_plane *plane)
@@ -1243,46 +965,16 @@
pdpu->is_error = error;
}
-static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
{
- uint32_t nplanes, src_flags;
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
- struct dpu_plane_state *pstate;
- struct dpu_plane_state *old_pstate;
- const struct dpu_format *fmt;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- int ret, min_scale;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return -EINVAL;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return -EINVAL;
- } else if (!old_state) {
- DPU_ERROR("invalid old state\n");
- return -EINVAL;
- }
-
- pdpu = to_dpu_plane(plane);
- state = plane->state;
-
- pstate = to_dpu_plane_state(state);
-
- old_pstate = to_dpu_plane_state(old_state);
-
- crtc = state->crtc;
- fb = state->fb;
- if (!crtc || !fb) {
- DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
- crtc != 0, fb != 0);
- return -EINVAL;
- }
- fmt = to_dpu_format(msm_framebuffer_format(fb));
- nplanes = fmt->num_planes;
+ uint32_t src_flags;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ const struct dpu_format *fmt =
+ to_dpu_format(msm_framebuffer_format(fb));
memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
@@ -1293,15 +985,6 @@
pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
- min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
- ret = drm_atomic_helper_check_plane_state(state, crtc->state, min_scale,
- pdpu->pipe_sblk->maxupscale << 16,
- true, false);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
- return ret;
- }
-
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
crtc->base.id, DRM_RECT_ARG(&state->dst),
@@ -1322,7 +1005,7 @@
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
/* skip remaining processing on color fill */
- return 0;
+ return;
}
if (pdpu->pipe_hw->ops.setup_rects) {
@@ -1354,8 +1037,21 @@
pstate->multirect_mode);
if (pdpu->pipe_hw->ops.setup_format) {
+ unsigned int rotation;
+
src_flags = 0x0;
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ src_flags |= DPU_SSPP_FLIP_LR;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ src_flags |= DPU_SSPP_FLIP_UD;
+
/* update format */
pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
pstate->multirect_index);
@@ -1393,30 +1089,13 @@
}
_dpu_plane_set_qos_remap(plane);
- return 0;
}
-static void _dpu_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
- struct dpu_plane_state *pstate;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return;
- } else if (!old_state) {
- DPU_ERROR("invalid old state\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
- state = plane->state;
- pstate = to_dpu_plane_state(state);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
pstate->multirect_mode);
@@ -1432,31 +1111,17 @@
static void dpu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
pdpu->is_error = false;
- state = plane->state;
DPU_DEBUG_PLANE(pdpu, "\n");
- if (!dpu_plane_sspp_enabled(state)) {
- _dpu_plane_atomic_disable(plane, old_state);
+ if (!state->visible) {
+ _dpu_plane_atomic_disable(plane);
} else {
- int ret;
-
- ret = dpu_plane_sspp_atomic_update(plane, old_state);
- /* atomic_check should have ensured that this doesn't fail */
- WARN_ON(ret < 0);
+ dpu_plane_sspp_atomic_update(plane);
}
}
@@ -1488,13 +1153,10 @@
mutex_destroy(&pdpu->lock);
- drm_plane_helper_disable(plane, NULL);
-
/* this will destroy the states as well */
drm_plane_cleanup(plane);
- if (pdpu->pipe_hw)
- dpu_hw_sspp_destroy(pdpu->pipe_hw);
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
kfree(pdpu);
}
@@ -1503,21 +1165,8 @@
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dpu_plane_state *pstate;
-
- if (!plane || !state) {
- DPU_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
- return;
- }
-
- pstate = to_dpu_plane_state(state);
-
- /* remove ref count for frame buffers */
- if (state->fb)
- drm_framebuffer_put(state->fb);
-
- kfree(pstate);
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_dpu_plane_state(state));
}
static struct drm_plane_state *
@@ -1583,30 +1232,29 @@
}
#ifdef CONFIG_DEBUG_FS
+static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ if (!pdpu->is_rt_pipe)
+ return;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
static ssize_t _dpu_plane_danger_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
- int len = 0;
- char buf[40] = {'\0'};
+ int len;
+ char buf[40];
- if (!cfg)
- return -ENODEV;
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- if (*ppos)
- return 0; /* the end */
-
- len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- if (len < 0 || len >= sizeof(buf))
- return 0;
-
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
-
- *ppos += len; /* increase offset */
-
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
@@ -1636,23 +1284,12 @@
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
int disable_panic;
- char buf[10];
+ int ret;
- if (!cfg)
- return -EFAULT;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
-
- if (kstrtoint(buf, 0, &disable_panic))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
if (disable_panic) {
/* Disable panic signal for all active pipes */
@@ -1677,42 +1314,16 @@
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
- struct dpu_kms *kms;
- struct msm_drm_private *priv;
- const struct dpu_sspp_sub_blks *sblk = 0;
- const struct dpu_sspp_cfg *cfg = 0;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
-
- kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
-
- if (pdpu && pdpu->pipe_hw)
- cfg = pdpu->pipe_hw->cap;
- if (cfg)
- sblk = cfg->sblk;
-
- if (!sblk)
- return 0;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(plane);
+ const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
/* create overall sub-directory for the pipe */
pdpu->debugfs_root =
debugfs_create_dir(pdpu->pipe_name,
plane->dev->primary->debugfs_root);
- if (!pdpu->debugfs_root)
- return -ENOMEM;
-
/* don't error check these */
debugfs_create_x32("features", 0600,
pdpu->debugfs_root, &pdpu->features);
@@ -1774,25 +1385,11 @@
return 0;
}
-
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu;
-
- if (!plane)
- return;
- pdpu = to_dpu_plane(plane);
-
- debugfs_remove_recursive(pdpu->debugfs_root);
-}
#else
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
return 0;
}
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-}
#endif
static int dpu_plane_late_register(struct drm_plane *plane)
@@ -1802,7 +1399,26 @@
static void dpu_plane_early_unregister(struct drm_plane *plane)
{
- _dpu_plane_destroy_debugfs(plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
+}
+
+static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) {
+ if (format == qcom_compressed_supported_formats[i])
+ return true;
+ }
+ }
+
+ return false;
}
static const struct drm_plane_funcs dpu_plane_funcs = {
@@ -1814,6 +1430,7 @@
.atomic_destroy_state = dpu_plane_destroy_state,
.late_register = dpu_plane_late_register,
.early_unregister = dpu_plane_early_unregister,
+ .format_mod_supported = dpu_plane_format_mod_supported,
};
static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
@@ -1835,46 +1452,24 @@
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, bool primary_plane,
+ uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id)
{
struct drm_plane *plane = NULL, *master_plane = NULL;
- const struct dpu_format_extended *format_list;
+ const uint32_t *format_list;
struct dpu_plane *pdpu;
- struct msm_drm_private *priv;
- struct dpu_kms *kms;
- enum drm_plane_type type;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
+ uint32_t num_formats;
int ret = -EINVAL;
- if (!dev) {
- DPU_ERROR("[%u]device is NULL\n", pipe);
- goto exit;
- }
-
- priv = dev->dev_private;
- if (!priv) {
- DPU_ERROR("[%u]private data is NULL\n", pipe);
- goto exit;
- }
-
- if (!priv->kms) {
- DPU_ERROR("[%u]invalid KMS reference\n", pipe);
- goto exit;
- }
- kms = to_dpu_kms(priv->kms);
-
- if (!kms->catalog) {
- DPU_ERROR("[%u]invalid catalog reference\n", pipe);
- goto exit;
- }
-
/* create and zero local structure */
pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
if (!pdpu) {
DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
ret = -ENOMEM;
- goto exit;
+ return ERR_PTR(ret);
}
/* cache local stuff for later */
@@ -1909,30 +1504,18 @@
goto clean_sspp;
}
- if (!master_plane_id)
- format_list = pdpu->pipe_sblk->format_list;
- else
+ if (pdpu->is_virtual) {
format_list = pdpu->pipe_sblk->virt_format_list;
-
- pdpu->nformats = dpu_populate_formats(format_list,
- pdpu->formats,
- 0,
- ARRAY_SIZE(pdpu->formats));
-
- if (!pdpu->nformats) {
- DPU_ERROR("[%u]no valid formats for plane\n", pipe);
- goto clean_sspp;
+ num_formats = pdpu->pipe_sblk->virt_num_formats;
+ }
+ else {
+ format_list = pdpu->pipe_sblk->format_list;
+ num_formats = pdpu->pipe_sblk->num_formats;
}
- if (pdpu->features & BIT(DPU_SSPP_CURSOR))
- type = DRM_PLANE_TYPE_CURSOR;
- else if (primary_plane)
- type = DRM_PLANE_TYPE_PRIMARY;
- else
- type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
- pdpu->formats, pdpu->nformats,
- NULL, type, NULL);
+ format_list, num_formats,
+ supported_format_modifiers, type, NULL);
if (ret)
goto clean_sspp;
@@ -1949,6 +1532,15 @@
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+ drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
@@ -1966,6 +1558,5 @@
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
kfree(pdpu);
-exit:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index f6fe6dd..4569497 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _DPU_PLANE_H_
@@ -28,23 +17,18 @@
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
- * @property_state: Local storage for msm_prop properties
- * @property_values: cached plane property values
* @aspace: pointer to address space for input/output buffers
- * @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
* @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions
- * @scaler_check_state: indicates status of user provided pixel extension data
* @cdp_cfg: CDP configuration
*/
struct dpu_plane_state {
struct drm_plane_state base;
struct msm_gem_address_space *aspace;
- void *input_fence;
enum dpu_stage stage;
uint32_t multirect_index;
uint32_t multirect_mode;
@@ -107,12 +91,6 @@
void dpu_plane_flush(struct drm_plane *plane);
/**
- * dpu_plane_kickoff - final plane operations before commit kickoff
- * @plane: Pointer to drm plane structure
- */
-void dpu_plane_kickoff(struct drm_plane *plane);
-
-/**
* dpu_plane_set_error: enable/disable error condition
* @plane: pointer to drm_plane structure
*/
@@ -122,7 +100,7 @@
* dpu_plane_init - create new dpu plane for the given pipe
* @dev: Pointer to DRM device
* @pipe: dpu hardware pipe identifier
- * @primary_plane: true if this pipe is primary plane for crtc
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
* a regular plane initialization. A non-zero primary plane
@@ -130,7 +108,7 @@
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, bool primary_plane,
+ uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id);
/**
@@ -147,14 +125,6 @@
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
/**
- * dpu_plane_wait_input_fence - wait for input fence object
- * @plane: Pointer to DRM plane object
- * @wait_ms: Wait timeout value
- * Returns: Zero on success
- */
-int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
-
-/**
* dpu_plane_color_fill - enables color fill on plane
* @plane: Pointer to DRM plane object
* @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
@@ -164,12 +134,4 @@
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
-/**
- * dpu_plane_set_revalidate - sets revalidate flag which forces a full
- * validation of the plane properties in the next atomic check
- * @plane: Pointer to DRM plane object
- * @enable: Boolean to set/unset the flag
- */
-void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
-
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
deleted file mode 100644
index a75eebc..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-
-#include "dpu_power_handle.h"
-#include "dpu_trace.h"
-
-static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
- [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
- [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
- [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
-};
-
-const char *dpu_power_handle_get_dbus_name(u32 bus_id)
-{
- if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
- return data_bus_name[bus_id];
-
- return NULL;
-}
-
-static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
- u32 event_type)
-{
- struct dpu_power_event *event;
-
- list_for_each_entry(event, &phandle->event_list, list) {
- if (event->event_type & event_type)
- event->cb_fnc(event_type, event->usr);
- }
-}
-
-struct dpu_power_client *dpu_power_client_create(
- struct dpu_power_handle *phandle, char *client_name)
-{
- struct dpu_power_client *client;
- static u32 id;
-
- if (!client_name || !phandle) {
- pr_err("client name is null or invalid power data\n");
- return ERR_PTR(-EINVAL);
- }
-
- client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&phandle->phandle_lock);
- strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
- client->usecase_ndx = VOTE_INDEX_DISABLE;
- client->id = id;
- client->active = true;
- pr_debug("client %s created:%pK id :%d\n", client_name,
- client, id);
- id++;
- list_add(&client->list, &phandle->power_client_clist);
- mutex_unlock(&phandle->phandle_lock);
-
- return client;
-}
-
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client)
-{
- if (!client || !phandle) {
- pr_err("reg bus vote: invalid client handle\n");
- } else if (!client->active) {
- pr_err("dpu power deinit already done\n");
- kfree(client);
- } else {
- pr_debug("bus vote client %s destroyed:%pK id:%u\n",
- client->name, client, client->id);
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&client->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(client);
- }
-}
-
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- phandle->dev = &pdev->dev;
-
- INIT_LIST_HEAD(&phandle->power_client_clist);
- INIT_LIST_HEAD(&phandle->event_list);
-
- mutex_init(&phandle->phandle_lock);
-}
-
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- struct dpu_power_client *curr_client, *next_client;
- struct dpu_power_event *curr_event, *next_event;
-
- if (!phandle || !pdev) {
- pr_err("invalid input param\n");
- return;
- }
-
- mutex_lock(&phandle->phandle_lock);
- list_for_each_entry_safe(curr_client, next_client,
- &phandle->power_client_clist, list) {
- pr_err("client:%s-%d still registered with refcount:%d\n",
- curr_client->name, curr_client->id,
- curr_client->refcount);
- curr_client->active = false;
- list_del(&curr_client->list);
- }
-
- list_for_each_entry_safe(curr_event, next_event,
- &phandle->event_list, list) {
- pr_err("event:%d, client:%s still registered\n",
- curr_event->event_type,
- curr_event->client_name);
- curr_event->active = false;
- list_del(&curr_event->list);
- }
- mutex_unlock(&phandle->phandle_lock);
-}
-
-int dpu_power_resource_enable(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, bool enable)
-{
- bool changed = false;
- u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
- struct dpu_power_client *client;
-
- if (!phandle || !pclient) {
- pr_err("invalid input argument\n");
- return -EINVAL;
- }
-
- mutex_lock(&phandle->phandle_lock);
- if (enable)
- pclient->refcount++;
- else if (pclient->refcount)
- pclient->refcount--;
-
- if (pclient->refcount)
- pclient->usecase_ndx = VOTE_INDEX_LOW;
- else
- pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
- list_for_each_entry(client, &phandle->power_client_clist, list) {
- if (client->usecase_ndx < VOTE_INDEX_MAX &&
- client->usecase_ndx > max_usecase_ndx)
- max_usecase_ndx = client->usecase_ndx;
- }
-
- if (phandle->current_usecase_ndx != max_usecase_ndx) {
- changed = true;
- prev_usecase_ndx = phandle->current_usecase_ndx;
- phandle->current_usecase_ndx = max_usecase_ndx;
- }
-
- pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
- __builtin_return_address(0), changed, max_usecase_ndx,
- pclient->name, pclient->id, enable, pclient->refcount);
-
- if (!changed)
- goto end;
-
- if (enable) {
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_PRE_ENABLE);
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_POST_ENABLE);
-
- } else {
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_PRE_DISABLE);
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_POST_DISABLE);
- }
-
-end:
- mutex_unlock(&phandle->phandle_lock);
- return 0;
-}
-
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name)
-{
- struct dpu_power_event *event;
-
- if (!phandle) {
- pr_err("invalid power handle\n");
- return ERR_PTR(-EINVAL);
- } else if (!cb_fnc || !event_type) {
- pr_err("no callback fnc or event type\n");
- return ERR_PTR(-EINVAL);
- }
-
- event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
- if (!event)
- return ERR_PTR(-ENOMEM);
-
- event->event_type = event_type;
- event->cb_fnc = cb_fnc;
- event->usr = usr;
- strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
- event->active = true;
-
- mutex_lock(&phandle->phandle_lock);
- list_add(&event->list, &phandle->event_list);
- mutex_unlock(&phandle->phandle_lock);
-
- return event;
-}
-
-void dpu_power_handle_unregister_event(
- struct dpu_power_handle *phandle,
- struct dpu_power_event *event)
-{
- if (!phandle || !event) {
- pr_err("invalid phandle or event\n");
- } else if (!event->active) {
- pr_err("power handle deinit already done\n");
- kfree(event);
- } else {
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&event->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(event);
- }
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
deleted file mode 100644
index 344f744..0000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _DPU_POWER_HANDLE_H_
-#define _DPU_POWER_HANDLE_H_
-
-#define MAX_CLIENT_NAME_LEN 128
-
-#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
-#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
-
-#include "dpu_io_util.h"
-
-/* event will be triggered before power handler disable */
-#define DPU_POWER_EVENT_PRE_DISABLE 0x1
-
-/* event will be triggered after power handler disable */
-#define DPU_POWER_EVENT_POST_DISABLE 0x2
-
-/* event will be triggered before power handler enable */
-#define DPU_POWER_EVENT_PRE_ENABLE 0x4
-
-/* event will be triggered after power handler enable */
-#define DPU_POWER_EVENT_POST_ENABLE 0x8
-
-/**
- * mdss_bus_vote_type: register bus vote type
- * VOTE_INDEX_DISABLE: removes the client vote
- * VOTE_INDEX_LOW: keeps the lowest vote for register bus
- * VOTE_INDEX_MAX: invalid
- */
-enum mdss_bus_vote_type {
- VOTE_INDEX_DISABLE,
- VOTE_INDEX_LOW,
- VOTE_INDEX_MAX,
-};
-
-/**
- * enum dpu_power_handle_data_bus_client - type of axi bus clients
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
- */
-enum dpu_power_handle_data_bus_client {
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
-};
-
-/**
- * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
- * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
- */
-enum DPU_POWER_HANDLE_DBUS_ID {
- DPU_POWER_HANDLE_DBUS_ID_MNOC,
- DPU_POWER_HANDLE_DBUS_ID_LLCC,
- DPU_POWER_HANDLE_DBUS_ID_EBI,
- DPU_POWER_HANDLE_DBUS_ID_MAX,
-};
-
-/**
- * struct dpu_power_client: stores the power client for dpu driver
- * @name: name of the client
- * @usecase_ndx: current regs bus vote type
- * @refcount: current refcount if multiple modules are using same
- * same client for enable/disable. Power module will
- * aggregate the refcount and vote accordingly for this
- * client.
- * @id: assigned during create. helps for debugging.
- * @list: list to attach power handle master list
- * @ab: arbitrated bandwidth for each bus client
- * @ib: instantaneous bandwidth for each bus client
- * @active: inidcates the state of dpu power handle
- */
-struct dpu_power_client {
- char name[MAX_CLIENT_NAME_LEN];
- short usecase_ndx;
- short refcount;
- u32 id;
- struct list_head list;
- u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- bool active;
-};
-
-/*
- * struct dpu_power_event - local event registration structure
- * @client_name: name of the client registering
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback event trigger
- * @event: refer to DPU_POWER_HANDLE_EVENT_*
- * @list: list to attach event master list
- * @active: indicates the state of dpu power handle
- */
-struct dpu_power_event {
- char client_name[MAX_CLIENT_NAME_LEN];
- void (*cb_fnc)(u32 event_type, void *usr);
- void *usr;
- u32 event_type;
- struct list_head list;
- bool active;
-};
-
-/**
- * struct dpu_power_handle: power handle main struct
- * @client_clist: master list to store all clients
- * @phandle_lock: lock to synchronize the enable/disable
- * @dev: pointer to device structure
- * @usecase_ndx: current usecase index
- * @event_list: current power handle event list
- */
-struct dpu_power_handle {
- struct list_head power_client_clist;
- struct mutex phandle_lock;
- struct device *dev;
- u32 current_usecase_ndx;
- struct list_head event_list;
-};
-
-/**
- * dpu_power_resource_init() - initializes the dpu power handle
- * @pdev: platform device to search the power resources
- * @pdata: power handle to store the power resources
- */
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_resource_deinit() - release the dpu power handle
- * @pdev: platform device for power resources
- * @pdata: power handle containing the resources
- *
- * Return: error code.
- */
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_client_create() - create the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: error code.
- */
-struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
- char *client_name);
-
-/**
- * dpu_power_client_destroy() - destroy the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: none
- */
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client);
-
-/**
- * dpu_power_resource_enable() - enable/disable the power resources
- * @pdata: power handle containing the resources
- * @client: client information to enable/disable its vote
- * @enable: boolean request for enable/disable
- *
- * Return: error code.
- */
-int dpu_power_resource_enable(struct dpu_power_handle *pdata,
- struct dpu_power_client *pclient, bool enable);
-
-/**
- * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
- * @phandle: power handle containing the resources
- * @client: client information to bandwidth control
- * @enable: true to enable bandwidth for data base
- *
- * Return: none
- */
-void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, int enable);
-
-/**
- * dpu_power_handle_register_event - register a callback function for an event.
- * Clients can register for multiple events with a single register.
- * Any block with access to phandle can register for the event
- * notification.
- * @phandle: power handle containing the resources
- * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback on event trigger
- *
- * Return: event pointer if success, or error code otherwise
- */
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name);
-/**
- * dpu_power_handle_unregister_event - unregister callback for event(s)
- * @phandle: power handle containing the resources
- * @event: event pointer returned after power handle register
- */
-void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
- struct dpu_power_event *event);
-
-/**
- * dpu_power_handle_get_dbus_name - get name of given data bus identifier
- * @bus_id: data bus identifier
- * Return: Pointer to name string if success; NULL otherwise
- */
-const char *dpu_power_handle_get_dbus_name(u32 bus_id);
-
-#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 13c0a36..ddc8412 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -1,163 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
-#include "dpu_hw_cdm.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
#include "dpu_encoder.h"
#include "dpu_trace.h"
-#define RESERVED_BY_OTHER(h, r) \
- ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
-
-#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
-#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
-#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
-#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
- (t).num_comp_enc == (r).num_enc && \
- (t).num_intf == (r).num_intf)
-
-struct dpu_rm_topology_def {
- enum dpu_rm_topology_name top_name;
- int num_lm;
- int num_comp_enc;
- int num_intf;
- int num_ctl;
- int needs_split_display;
-};
-
-static const struct dpu_rm_topology_def g_top_table[] = {
- { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
- { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
- { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
- { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
-};
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->enc_id && (h)->enc_id != r)
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
- * @top_ctrl: topology control preference from kernel client
- * @top: selected topology for the display
+ * @topology: selected topology for the display
* @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
- uint64_t top_ctrl;
- const struct dpu_rm_topology_def *topology;
+ struct msm_display_topology topology;
struct dpu_encoder_hw_resources hw_res;
};
-/**
- * struct dpu_rm_rsvp - Use Case Reservation tagging structure
- * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
- * By using as a tag, rather than lists of pointers to HW blocks used
- * we can avoid some list management since we don't know how many blocks
- * of each type a given use case may require.
- * @list: List head for list of all reservations
- * @seq: Global RSVP sequence number for debugging, especially for
- * differentiating differenct allocations for same encoder.
- * @enc_id: Reservations are tracked by Encoder DRM object ID.
- * CRTCs may be connected to multiple Encoders.
- * An encoder or connector id identifies the display path.
- * @topology DRM<->HW topology use case
- */
-struct dpu_rm_rsvp {
- struct list_head list;
- uint32_t seq;
- uint32_t enc_id;
- enum dpu_rm_topology_name topology;
-};
/**
* struct dpu_rm_hw_blk - hardware block tracking list member
* @list: List head for list of all hardware blocks tracking items
- * @rsvp: Pointer to use case reservation if reserved by a client
- * @rsvp_nxt: Temporary pointer used during reservation to the incoming
- * request. Will be swapped into rsvp if proposal is accepted
- * @type: Type of hardware block this structure tracks
* @id: Hardware ID number, within it's own space, ie. LM_X
- * @catalog: Pointer to the hardware catalog entry for this block
+ * @enc_id: Encoder id to which this blk is binded
* @hw: Pointer to the hardware register access object for this block
*/
struct dpu_rm_hw_blk {
struct list_head list;
- struct dpu_rm_rsvp *rsvp;
- struct dpu_rm_rsvp *rsvp_nxt;
- enum dpu_hw_blk_type type;
uint32_t id;
+ uint32_t enc_id;
struct dpu_hw_blk *hw;
};
-/**
- * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
- */
-enum dpu_rm_dbg_rsvp_stage {
- DPU_RM_STAGE_BEGIN,
- DPU_RM_STAGE_AFTER_CLEAR,
- DPU_RM_STAGE_AFTER_RSVPNEXT,
- DPU_RM_STAGE_FINAL
-};
-
-static void _dpu_rm_print_rsvps(
- struct dpu_rm *rm,
- enum dpu_rm_dbg_rsvp_stage stage)
-{
- struct dpu_rm_rsvp *rsvp;
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- DPU_DEBUG("%d\n", stage);
-
- list_for_each_entry(rsvp, &rm->rsvps, list) {
- DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
- rsvp->enc_id, rsvp->topology);
- }
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (!blk->rsvp && !blk->rsvp_nxt)
- continue;
-
- DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
- (blk->rsvp) ? blk->rsvp->seq : 0,
- (blk->rsvp) ? blk->rsvp->enc_id : 0,
- (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
- (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
- blk->type, blk->id);
- }
- }
-}
-
-struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
-{
- return rm->hw_mdp;
-}
-
-enum dpu_rm_topology_name
-dpu_rm_get_topology_name(struct msm_display_topology topology)
-{
- int i;
-
- for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
- if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
- return g_top_table[i].top_name;
-
- return DPU_RM_TOPOLOGY_NONE;
-}
-
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
@@ -188,15 +70,7 @@
i->blk = list_prepare_entry(i->blk, blk_list, list);
list_for_each_entry_continue(i->blk, blk_list, list) {
- struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
-
- if (i->blk->type != i->type) {
- DPU_ERROR("found incorrect block type %d on %d list\n",
- i->blk->type, i->type);
- return false;
- }
-
- if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ if (i->enc_id == i->blk->enc_id) {
i->hw = i->blk->hw;
DPU_DEBUG("found type %d id %d for enc %d\n",
i->type, i->blk->id, i->enc_id);
@@ -229,9 +103,6 @@
case DPU_HW_BLK_CTL:
dpu_hw_ctl_destroy(hw);
break;
- case DPU_HW_BLK_CDM:
- dpu_hw_cdm_destroy(hw);
- break;
case DPU_HW_BLK_PINGPONG:
dpu_hw_pingpong_destroy(hw);
break;
@@ -251,34 +122,18 @@
int dpu_rm_destroy(struct dpu_rm *rm)
{
-
- struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
enum dpu_hw_blk_type type;
- if (!rm) {
- DPU_ERROR("invalid rm\n");
- return -EINVAL;
- }
-
- list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
- list_del(&rsvp_cur->list);
- kfree(rsvp_cur);
- }
-
-
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
list) {
list_del(&hw_cur->list);
- _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ _dpu_rm_hw_destroy(type, hw_cur->hw);
kfree(hw_cur);
}
}
- dpu_hw_mdp_destroy(rm->hw_mdp);
- rm->hw_mdp = NULL;
-
mutex_destroy(&rm->rm_lock);
return 0;
@@ -293,11 +148,8 @@
void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
- struct dpu_hw_mdp *hw_mdp;
void *hw;
- hw_mdp = rm->hw_mdp;
-
switch (type) {
case DPU_HW_BLK_LM:
hw = dpu_hw_lm_init(id, mmio, cat);
@@ -305,9 +157,6 @@
case DPU_HW_BLK_CTL:
hw = dpu_hw_ctl_init(id, mmio, cat);
break;
- case DPU_HW_BLK_CDM:
- hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
- break;
case DPU_HW_BLK_PINGPONG:
hw = dpu_hw_pingpong_init(id, mmio, cat);
break;
@@ -336,9 +185,9 @@
return -ENOMEM;
}
- blk->type = type;
blk->id = id;
blk->hw = hw;
+ blk->enc_id = 0;
list_add_tail(&blk->list, &rm->hw_blks[type]);
return 0;
@@ -346,13 +195,12 @@
int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- struct drm_device *dev)
+ void __iomem *mmio)
{
int rc, i;
enum dpu_hw_blk_type type;
- if (!rm || !cat || !mmio || !dev) {
+ if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
@@ -362,21 +210,9 @@
mutex_init(&rm->rm_lock);
- INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < DPU_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
- rm->dev = dev;
-
- /* Some of the sub-blocks require an mdptop to be created */
- rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
- if (IS_ERR_OR_NULL(rm->hw_mdp)) {
- rc = PTR_ERR(rm->hw_mdp);
- rm->hw_mdp = NULL;
- DPU_ERROR("failed: mdp hw not available\n");
- goto fail;
- }
-
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_lm_cfg *lm = &cat->mixer[i];
@@ -438,15 +274,6 @@
}
}
- for (i = 0; i < cat->cdm_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
- cat->cdm[i].id, &cat->cdm[i]);
- if (rc) {
- DPU_ERROR("failed: cdm hw not available\n");
- goto fail;
- }
- }
-
return 0;
fail:
@@ -455,12 +282,17 @@
return rc;
}
+static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
+{
+ return top->num_intf > 1;
+}
+
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
- * @rsvp: reservation currently being created
+ * @enc_id: encoder id requesting for allocation
* @reqs: proposed use case requirements
* @lm: proposed layer mixer, function checks if lm, and all other hardwired
* blocks connected to the lm (pp) is available and appropriate
@@ -472,7 +304,7 @@
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
struct dpu_rm_requirements *reqs,
struct dpu_rm_hw_blk *lm,
struct dpu_rm_hw_blk **pp,
@@ -499,7 +331,7 @@
}
/* Already reserved? */
- if (RESERVED_BY_OTHER(lm, rsvp)) {
+ if (RESERVED_BY_OTHER(lm, enc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
return false;
}
@@ -517,7 +349,7 @@
return false;
}
- if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ if (RESERVED_BY_OTHER(*pp, enc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
(*pp)->id);
return false;
@@ -526,10 +358,8 @@
return true;
}
-static int _dpu_rm_reserve_lms(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct dpu_rm_requirements *reqs)
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+ struct dpu_rm_requirements *reqs)
{
struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
@@ -538,14 +368,14 @@
int lm_count = 0;
int i, rc = 0;
- if (!reqs->topology->num_lm) {
- DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ if (!reqs->topology.num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
return -EINVAL;
}
/* Find a primary mixer */
dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology->num_lm &&
+ while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&pp, 0, sizeof(pp));
@@ -554,7 +384,7 @@
lm[lm_count] = iter_i.blk;
if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, rsvp, reqs, lm[lm_count],
+ rm, enc_id, reqs, lm[lm_count],
&pp[lm_count], NULL))
continue;
@@ -563,13 +393,13 @@
/* Valid primary mixer found, find matching peers */
dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology->num_lm &&
+ while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, rsvp, reqs, iter_j.blk,
+ rm, enc_id, reqs, iter_j.blk,
&pp[lm_count], iter_i.blk))
continue;
@@ -578,7 +408,7 @@
}
}
- if (lm_count != reqs->topology->num_lm) {
+ if (lm_count != reqs->topology.num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@@ -587,11 +417,10 @@
if (!lm[i])
break;
- lm[i]->rsvp_nxt = rsvp;
- pp[i]->rsvp_nxt = rsvp;
+ lm[i]->enc_id = enc_id;
+ pp[i]->enc_id = enc_id;
- trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
- pp[i]->id);
+ trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
}
return rc;
@@ -599,88 +428,50 @@
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- const struct dpu_rm_topology_def *top)
+ uint32_t enc_id,
+ const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
struct dpu_rm_hw_iter iter;
- int i = 0;
+ int i = 0, num_ctls = 0;
+ bool needs_split_display = false;
memset(&ctls, 0, sizeof(ctls));
+ /* each hw_intf needs its own hw_ctrl to program its control path */
+ num_ctls = top->num_intf;
+
+ needs_split_display = _dpu_rm_needs_split_display(top);
+
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
while (_dpu_rm_get_hw_locked(rm, &iter)) {
const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
unsigned long features = ctl->caps->features;
bool has_split_display;
- if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ if (RESERVED_BY_OTHER(iter.blk, enc_id))
continue;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
- if (top->needs_split_display != has_split_display)
+ if (needs_split_display != has_split_display)
continue;
ctls[i] = iter.blk;
DPU_DEBUG("ctl %d match\n", iter.blk->id);
- if (++i == top->num_ctl)
+ if (++i == num_ctls)
break;
}
- if (i != top->num_ctl)
+ if (i != num_ctls)
return -ENAVAIL;
- for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
- ctls[i]->rsvp_nxt = rsvp;
- trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
- rsvp->enc_id);
- }
-
- return 0;
-}
-
-static int _dpu_rm_reserve_cdm(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- uint32_t id,
- enum dpu_hw_blk_type type)
-{
- struct dpu_rm_hw_iter iter;
-
- DRM_DEBUG_KMS("type %d id %d\n", type, id);
-
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
- const struct dpu_cdm_cfg *caps = cdm->caps;
- bool match = false;
-
- if (RESERVED_BY_OTHER(iter.blk, rsvp))
- continue;
-
- if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
- match = test_bit(id, &caps->intf_connect);
-
- DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
- iter.blk->type, iter.blk->id, rsvp->enc_id,
- caps->intf_connect, match);
-
- if (!match)
- continue;
-
- trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
- rsvp->enc_id);
- iter.blk->rsvp_nxt = rsvp;
- break;
- }
-
- if (!iter.hw) {
- DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
- return -ENAVAIL;
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
+ ctls[i]->enc_id = enc_id;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
}
return 0;
@@ -688,10 +479,9 @@
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
uint32_t id,
- enum dpu_hw_blk_type type,
- bool needs_cdm)
+ enum dpu_hw_blk_type type)
{
struct dpu_rm_hw_iter iter;
int ret = 0;
@@ -702,14 +492,13 @@
if (iter.blk->id != id)
continue;
- if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
DPU_ERROR("type %d id %d already reserved\n", type, id);
return -ENAVAIL;
}
- iter.blk->rsvp_nxt = rsvp;
- trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
- rsvp->enc_id);
+ iter.blk->enc_id = enc_id;
+ trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
break;
}
@@ -719,15 +508,12 @@
return -EINVAL;
}
- if (needs_cdm)
- ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
-
return ret;
}
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res)
{
int i, ret = 0;
@@ -737,8 +523,8 @@
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, rsvp, id,
- DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ ret = _dpu_rm_reserve_intf(rm, enc_id, id,
+ DPU_HW_BLK_INTF);
if (ret)
return ret;
}
@@ -746,47 +532,27 @@
return ret;
}
-static int _dpu_rm_make_next_rsvp(
+static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs)
{
int ret;
- struct dpu_rm_topology_def topology;
- /* Create reservation info, tag reserved blocks with it as we go */
- rsvp->seq = ++rm->rsvp_next_seq;
- rsvp->enc_id = enc->base.id;
- rsvp->topology = reqs->topology->top_name;
- list_add_tail(&rsvp->list, &rm->rsvps);
-
- ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- /*
- * Do assignment preferring to give away low-resource CTLs first:
- * - Check mixers without Split Display
- * - Only then allow to grab from CTLs with split display capability
- */
- _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
- if (ret && !reqs->topology->needs_split_display) {
- memcpy(&topology, reqs->topology, sizeof(topology));
- topology.needs_split_display = true;
- _dpu_rm_reserve_ctls(rm, rsvp, &topology);
- }
+ ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
- ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
if (ret)
return ret;
@@ -797,281 +563,82 @@
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
- int i;
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
- memset(reqs, 0, sizeof(*reqs));
+ reqs->topology = req_topology;
- dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
-
- for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
- if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
- req_topology)) {
- reqs->topology = &g_top_table[i];
- break;
- }
- }
-
- if (!reqs->topology) {
- DPU_ERROR("invalid topology for the display\n");
- return -EINVAL;
- }
-
- /**
- * Set the requirement based on caps if not set from user space
- * This will ensure to select LM tied with DS blocks
- * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
- */
- if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
- conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
- reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
-
- DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
- reqs->hw_res.display_num_of_h_tiles);
- DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
- reqs->topology->num_lm, reqs->topology->num_ctl,
- reqs->topology->top_name,
- reqs->topology->needs_split_display);
+ DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
+ reqs->topology.num_lm, reqs->topology.num_enc,
+ reqs->topology.num_intf);
return 0;
}
-static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
- struct dpu_rm *rm,
- struct drm_encoder *enc)
+static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
{
- struct dpu_rm_rsvp *i;
-
- if (!rm || !enc) {
- DPU_ERROR("invalid params\n");
- return NULL;
- }
-
- if (list_empty(&rm->rsvps))
- return NULL;
-
- list_for_each_entry(i, &rm->rsvps, list)
- if (i->enc_id == enc->base.id)
- return i;
-
- return NULL;
-}
-
-static struct drm_connector *_dpu_rm_get_connector(
- struct drm_encoder *enc)
-{
- struct drm_connector *conn = NULL;
- struct list_head *connector_list =
- &enc->dev->mode_config.connector_list;
-
- list_for_each_entry(conn, connector_list, head)
- if (conn->encoder == enc)
- return conn;
-
- return NULL;
-}
-
-/**
- * _dpu_rm_release_rsvp - release resources and release a reservation
- * @rm: KMS handle
- * @rsvp: RSVP pointer to release and release resources for
- */
-static void _dpu_rm_release_rsvp(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct drm_connector *conn)
-{
- struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
- if (!rsvp)
- return;
-
- DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
-
- list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
- if (rsvp == rsvp_c) {
- list_del(&rsvp_c->list);
- break;
- }
- }
-
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->rsvp == rsvp) {
- blk->rsvp = NULL;
- DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
- rsvp->seq, rsvp->enc_id,
- blk->type, blk->id);
- }
- if (blk->rsvp_nxt == rsvp) {
- blk->rsvp_nxt = NULL;
- DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
- rsvp->seq, rsvp->enc_id,
- blk->type, blk->id);
+ if (blk->enc_id == enc_id) {
+ blk->enc_id = 0;
+ DPU_DEBUG("rel enc %d %d %d\n", enc_id,
+ type, blk->id);
}
}
}
-
- kfree(rsvp);
}
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
{
- struct dpu_rm_rsvp *rsvp;
- struct drm_connector *conn;
-
- if (!rm || !enc) {
- DPU_ERROR("invalid params\n");
- return;
- }
-
mutex_lock(&rm->rm_lock);
- rsvp = _dpu_rm_get_rsvp(rm, enc);
- if (!rsvp) {
- DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- goto end;
- }
+ _dpu_rm_release_reservation(rm, enc->base.id);
- conn = _dpu_rm_get_connector(enc);
- if (!conn) {
- DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
- goto end;
- }
-
- _dpu_rm_release_rsvp(rm, rsvp, conn);
-end:
mutex_unlock(&rm->rm_lock);
}
-static int _dpu_rm_commit_rsvp(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct drm_connector_state *conn_state)
-{
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
- int ret = 0;
-
- /* Swap next rsvp to be the active */
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->rsvp_nxt) {
- blk->rsvp = blk->rsvp_nxt;
- blk->rsvp_nxt = NULL;
- }
- }
- }
-
- if (!ret)
- DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
- rsvp->topology);
-
- return ret;
-}
-
int dpu_rm_reserve(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only)
{
- struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_requirements reqs;
int ret;
- if (!rm || !enc || !crtc_state || !conn_state) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
/* Check if this is just a page-flip */
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
- DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
- conn_state->connector->base.id, enc->base.id,
- crtc_state->crtc->base.id, test_only);
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
+ enc->base.id, crtc_state->crtc->base.id, test_only);
mutex_lock(&rm->rm_lock);
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
-
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
- conn_state, &reqs, topology);
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
+ topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
goto end;
}
- /*
- * We only support one active reservation per-hw-block. But to implement
- * transactional semantics for test-only, and for allowing failure while
- * modifying your existing reservation, over the course of this
- * function we can have two reservations:
- * Current: Existing reservation
- * Next: Proposed reservation. The proposed reservation may fail, or may
- * be discarded if in test-only mode.
- * If reservation is successful, and we're not in test-only, then we
- * replace the current with the next.
- */
- rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt) {
- ret = -ENOMEM;
- goto end;
- }
-
- rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
-
- /*
- * User can request that we clear out any reservation during the
- * atomic_check phase by using this CLEAR bit
- */
- if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
- DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
- rsvp_cur->seq, rsvp_cur->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
- rsvp_cur = NULL;
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
- }
-
- /* Check the proposed reservation, store it in hw's "next" field */
- ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
- rsvp_nxt, &reqs);
-
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
-
+ ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
- } else if (test_only && !RM_RQ_LOCK(&reqs)) {
- /*
- * Normally, if test_only, test the reservation and then undo
- * However, if the user requests LOCK, then keep the reservation
- * made during the atomic_check phase.
- */
- DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
- rsvp_nxt->seq, rsvp_nxt->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
- } else {
- if (test_only && RM_RQ_LOCK(&reqs))
- DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
- rsvp_nxt->seq, rsvp_nxt->enc_id);
-
- _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
-
- ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ _dpu_rm_release_reservation(rm, enc->base.id);
+ } else if (test_only) {
+ /* test_only: test the reservation and then undo */
+ DPU_DEBUG("test_only: discard test [enc: %d]\n",
+ enc->base.id);
+ _dpu_rm_release_reservation(rm, enc->base.id);
}
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
-
end:
mutex_unlock(&rm->rm_lock);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index ffd1841..9c580a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -1,15 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __DPU_RM_H__
@@ -21,56 +12,15 @@
#include "dpu_hw_top.h"
/**
- * enum dpu_rm_topology_name - HW resource use case in use by connector
- * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
- * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
- * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
- * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
- */
-enum dpu_rm_topology_name {
- DPU_RM_TOPOLOGY_NONE = 0,
- DPU_RM_TOPOLOGY_SINGLEPIPE,
- DPU_RM_TOPOLOGY_DUALPIPE,
- DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
- DPU_RM_TOPOLOGY_MAX,
-};
-
-/**
- * enum dpu_rm_topology_control - HW resource use case in use by connector
- * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
- * test, reserve the resources for this display.
- * Normal behavior would not impact the reservation
- * list during the AtomicTest phase.
- * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
- * release any reservation held by this display.
- * Normal behavior would not impact the
- * reservation list during the AtomicTest phase.
- * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
- */
-enum dpu_rm_topology_control {
- DPU_RM_TOPCTL_RESERVE_LOCK,
- DPU_RM_TOPCTL_RESERVE_CLEAR,
- DPU_RM_TOPCTL_DS,
-};
-
-/**
* struct dpu_rm - DPU dynamic hardware resource manager
- * @dev: device handle for event logging purposes
- * @rsvps: list of hardware reservations by each crtc->encoder->connector
* @hw_blks: array of lists of hardware resources present in the system, one
* list per type of hardware block
- * @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
- * @rsvp_next_seq: sequence number for next reservation for debugging purposes
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
- struct drm_device *dev;
- struct list_head rsvps;
struct list_head hw_blks[DPU_HW_BLK_MAX];
- struct dpu_hw_mdp *hw_mdp;
uint32_t lm_max_width;
- uint32_t rsvp_next_seq;
struct mutex rm_lock;
};
@@ -100,13 +50,11 @@
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
* @mmio: mapped register io address of MDP
- * @dev: device handle for event logging purposes
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- struct drm_device *dev);
+ void __iomem *mmio);
/**
* dpu_rm_destroy - Free all memory allocated by dpu_rm_init
@@ -125,7 +73,6 @@
* @rm: DPU Resource Manager handle
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
- * @conn_state: Proposed Atomic DRM Connector State handle
* @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
@@ -133,7 +80,6 @@
int dpu_rm_reserve(struct dpu_rm *rm,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only);
@@ -147,14 +93,6 @@
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
/**
- * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
- * This is never reserved, and is usable by any display.
- * @rm: DPU Resource Manager handle
- * @Return: Pointer to hw block or NULL
- */
-struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
-
-/**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
* using dpu_rm_get_hw
* @iter: iter object to initialize
@@ -179,21 +117,4 @@
* @Return: true on match found, false on no match found
*/
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
-
-/**
- * dpu_rm_check_property_topctl - validate property bitmask before it is set
- * @val: user's proposed topology control bitmask
- * @Return: 0 on success or error
- */
-int dpu_rm_check_property_topctl(uint64_t val);
-
-/**
- * dpu_rm_get_topology_name - returns the name of the the given topology
- * definition
- * @topology: topology definition
- * @Return: name of the topology
- */
-enum dpu_rm_topology_name
-dpu_rm_get_topology_name(struct msm_display_topology topology);
-
#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index ae0ca50..eecfe9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -99,27 +91,6 @@
__entry->vbif_idx)
)
-TRACE_EVENT(dpu_perf_update_bus,
- TP_PROTO(int client, unsigned long long ab_quota,
- unsigned long long ib_quota),
- TP_ARGS(client, ab_quota, ib_quota),
- TP_STRUCT__entry(
- __field(int, client)
- __field(u64, ab_quota)
- __field(u64, ib_quota)
- ),
- TP_fast_assign(
- __entry->client = client;
- __entry->ab_quota = ab_quota;
- __entry->ib_quota = ib_quota;
- ),
- TP_printk("Request client:%d ab=%llu ib=%llu",
- __entry->client,
- __entry->ab_quota,
- __entry->ib_quota)
-)
-
-
TRACE_EVENT(dpu_cmd_release_bw,
TP_PROTO(u32 crtc_id),
TP_ARGS(crtc_id),
@@ -167,16 +138,12 @@
)
TRACE_EVENT(dpu_perf_crtc_update,
- TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
- u64 bw_ctl_ebi, u32 core_clk_rate,
- bool stop_req, u32 update_bus, u32 update_clk),
- TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
- stop_req, update_bus, update_clk),
+ TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
+ bool stop_req, bool update_bus, bool update_clk),
+ TP_ARGS(crtc, bw_ctl, core_clk_rate, stop_req, update_bus, update_clk),
TP_STRUCT__entry(
__field(u32, crtc)
- __field(u64, bw_ctl_mnoc)
- __field(u64, bw_ctl_llcc)
- __field(u64, bw_ctl_ebi)
+ __field(u64, bw_ctl)
__field(u32, core_clk_rate)
__field(bool, stop_req)
__field(u32, update_bus)
@@ -184,20 +151,16 @@
),
TP_fast_assign(
__entry->crtc = crtc;
- __entry->bw_ctl_mnoc = bw_ctl_mnoc;
- __entry->bw_ctl_llcc = bw_ctl_llcc;
- __entry->bw_ctl_ebi = bw_ctl_ebi;
+ __entry->bw_ctl = bw_ctl;
__entry->core_clk_rate = core_clk_rate;
__entry->stop_req = stop_req;
__entry->update_bus = update_bus;
__entry->update_clk = update_clk;
),
TP_printk(
- "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ "crtc=%d bw_ctl=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
__entry->crtc,
- __entry->bw_ctl_mnoc,
- __entry->bw_ctl_llcc,
- __entry->bw_ctl_ebi,
+ __entry->bw_ctl,
__entry->core_clk_rate,
__entry->stop_req,
__entry->update_bus,
@@ -319,6 +282,10 @@
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
TRACE_EVENT(dpu_enc_enable,
TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -425,7 +392,7 @@
__entry->rc_state = rc_state;
__assign_str(stage_str, stage);
),
- TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d",
__get_str(stage_str), __entry->drm_id, __entry->sw_event,
__entry->idle_pc_supported ? "true" : "false",
__entry->rc_state)
@@ -468,14 +435,16 @@
TRACE_EVENT(dpu_enc_trigger_flush,
TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
- int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
+ u32 pending_flush_ret),
TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
- pending_flush_ret),
+ extra_flush_bits, pending_flush_ret),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intf, intf_idx )
__field( int, pending_kickoff_cnt )
__field( int, ctl_idx )
+ __field( u32, extra_flush_bits )
__field( u32, pending_flush_ret )
),
TP_fast_assign(
@@ -483,12 +452,14 @@
__entry->intf_idx = intf_idx;
__entry->pending_kickoff_cnt = pending_kickoff_cnt;
__entry->ctl_idx = ctl_idx;
+ __entry->extra_flush_bits = extra_flush_bits;
__entry->pending_flush_ret = pending_flush_ret;
),
TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
- "pending_flush_ret=%u", __entry->drm_id,
- __entry->intf_idx, __entry->pending_kickoff_cnt,
- __entry->ctl_idx, __entry->pending_flush_ret)
+ "extra_flush_bits=0x%x pending_flush_ret=0x%x",
+ __entry->drm_id, __entry->intf_idx,
+ __entry->pending_kickoff_cnt, __entry->ctl_idx,
+ __entry->extra_flush_bits, __entry->pending_flush_ret)
);
DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
@@ -535,10 +506,6 @@
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
);
-DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
- TP_PROTO(uint32_t drm_id, u32 event),
- TP_ARGS(drm_id, event)
-);
DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
@@ -682,37 +649,41 @@
TP_STRUCT__entry(
__field( uint32_t, crtc_id )
__field( uint32_t, plane_id )
- __field( struct drm_plane_state*,state )
- __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, fb_id )
+ __field_struct( struct drm_rect, src_rect )
+ __field_struct( struct drm_rect, dst_rect )
__field( uint32_t, stage_idx )
+ __field( enum dpu_stage, stage )
__field( enum dpu_sspp, sspp )
+ __field( uint32_t, multirect_idx )
+ __field( uint32_t, multirect_mode )
__field( uint32_t, pixel_format )
__field( uint64_t, modifier )
),
TP_fast_assign(
__entry->crtc_id = crtc_id;
__entry->plane_id = plane_id;
- __entry->state = state;
- __entry->pstate = pstate;
+ __entry->fb_id = state ? state->fb->base.id : 0;
+ __entry->src_rect = drm_plane_state_src(state);
+ __entry->dst_rect = drm_plane_state_dest(state);
__entry->stage_idx = stage_idx;
+ __entry->stage = pstate->stage;
__entry->sspp = sspp;
+ __entry->multirect_idx = pstate->multirect_index;
+ __entry->multirect_mode = pstate->multirect_mode;
__entry->pixel_format = pixel_format;
__entry->modifier = modifier;
),
- TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
- "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT
+ " dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d "
"multirect_index:%d multirect_mode:%u pix_format:%u "
"modifier:%llu",
- __entry->crtc_id, __entry->plane_id,
- __entry->state->fb ? __entry->state->fb->base.id : -1,
- __entry->state->src_w >> 16, __entry->state->src_h >> 16,
- __entry->state->src_x >> 16, __entry->state->src_y >> 16,
- __entry->state->crtc_w, __entry->state->crtc_h,
- __entry->state->crtc_x, __entry->state->crtc_y,
- __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
- __entry->pstate->multirect_index,
- __entry->pstate->multirect_mode, __entry->pixel_format,
- __entry->modifier)
+ __entry->crtc_id, __entry->plane_id, __entry->fb_id,
+ DRM_RECT_FP_ARG(&__entry->src_rect),
+ DRM_RECT_ARG(&__entry->dst_rect),
+ __entry->stage_idx, __entry->stage, __entry->sspp,
+ __entry->multirect_idx, __entry->multirect_mode,
+ __entry->pixel_format, __entry->modifier)
);
TRACE_EVENT(dpu_crtc_setup_lm_bounds,
@@ -721,15 +692,15 @@
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( int, mixer )
- __field( struct drm_rect *, bounds )
+ __field_struct( struct drm_rect, bounds )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->mixer = mixer;
- __entry->bounds = bounds;
+ __entry->bounds = *bounds;
),
TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
- __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+ __entry->mixer, DRM_RECT_ARG(&__entry->bounds))
);
TRACE_EVENT(dpu_crtc_vblank_enable,
@@ -740,21 +711,18 @@
__field( uint32_t, drm_id )
__field( uint32_t, enc_id )
__field( bool, enable )
- __field( struct dpu_crtc *, crtc )
+ __field( bool, enabled )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
- __entry->crtc = crtc;
+ __entry->enabled = crtc->enabled;
),
- TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
- "vblank_req:%s}",
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
- __entry->crtc->enabled ? "true" : "false",
- __entry->crtc->suspend ? "true" : "false",
- __entry->crtc->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -763,22 +731,16 @@
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( bool, enable )
- __field( struct dpu_crtc *, crtc )
+ __field( bool, enabled )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
- __entry->crtc = crtc;
+ __entry->enabled = crtc->enabled;
),
- TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ TP_printk("id:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
- __entry->crtc->enabled ? "true" : "false",
- __entry->crtc->suspend ? "true" : "false",
- __entry->crtc->vblank_requested ? "true" : "false")
-);
-DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
- TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
- TP_ARGS(drm_id, enable, crtc)
+ __entry->enabled ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -814,24 +776,24 @@
TP_ARGS(index, layout, multirect_index),
TP_STRUCT__entry(
__field( enum dpu_sspp, index )
- __field( struct dpu_hw_fmt_layout*, layout )
+ __field_struct( struct dpu_hw_fmt_layout, layout )
__field( enum dpu_sspp_multirect_index, multirect_index)
),
TP_fast_assign(
__entry->index = index;
- __entry->layout = layout;
+ __entry->layout = *layout;
__entry->multirect_index = multirect_index;
),
TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
- "multirect_index:%d", __entry->index, __entry->layout->width,
- __entry->layout->height, __entry->layout->plane_addr[0],
- __entry->layout->plane_size[0],
- __entry->layout->plane_addr[1],
- __entry->layout->plane_size[1],
- __entry->layout->plane_addr[2],
- __entry->layout->plane_size[2],
- __entry->layout->plane_addr[3],
- __entry->layout->plane_size[3], __entry->multirect_index)
+ "multirect_index:%d", __entry->index, __entry->layout.width,
+ __entry->layout.height, __entry->layout.plane_addr[0],
+ __entry->layout.plane_size[0],
+ __entry->layout.plane_addr[1],
+ __entry->layout.plane_size[1],
+ __entry->layout.plane_addr[2],
+ __entry->layout.plane_size[2],
+ __entry->layout.plane_addr[3],
+ __entry->layout.plane_size[3], __entry->multirect_index)
);
TRACE_EVENT(dpu_plane_disable,
@@ -853,52 +815,42 @@
);
DECLARE_EVENT_CLASS(dpu_rm_iter_template,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id),
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id),
TP_STRUCT__entry(
__field( uint32_t, id )
- __field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id )
),
TP_fast_assign(
__entry->id = id;
- __entry->type = type;
__entry->enc_id = enc_id;
),
- TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
- __entry->enc_id)
-);
-DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
+ TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
);
TRACE_EVENT(dpu_rm_reserve_lms,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
- uint32_t pp_id),
- TP_ARGS(id, type, enc_id, pp_id),
+ TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id),
+ TP_ARGS(id, enc_id, pp_id),
TP_STRUCT__entry(
__field( uint32_t, id )
- __field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id )
__field( uint32_t, pp_id )
),
TP_fast_assign(
__entry->id = id;
- __entry->type = type;
__entry->enc_id = enc_id;
__entry->pp_id = pp_id;
),
- TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
- __entry->type, __entry->enc_id, __entry->pp_id)
+ TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->enc_id, __entry->pp_id)
);
TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
@@ -979,19 +931,66 @@
TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
TP_ARGS(dev, stop_req, clk_rate),
TP_STRUCT__entry(
- __field( struct drm_device *, dev )
+ __string( dev_name, dev->unique )
__field( bool, stop_req )
__field( u64, clk_rate )
),
TP_fast_assign(
- __entry->dev = dev;
+ __assign_str(dev_name, dev->unique);
__entry->stop_req = stop_req;
__entry->clk_rate = clk_rate;
),
- TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name),
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+ TP_PROTO(u32 new_bits, u32 pending_mask),
+ TP_ARGS(new_bits, pending_mask),
+ TP_STRUCT__entry(
+ __field( u32, new_bits )
+ __field( u32, pending_mask )
+ ),
+ TP_fast_assign(
+ __entry->new_bits = new_bits;
+ __entry->pending_mask = pending_mask;
+ ),
+ TP_printk("new=%x existing=%x", __entry->new_bits,
+ __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush),
+ TP_STRUCT__entry(
+ __field( u32, pending_mask )
+ __field( u32, ctl_flush )
+ ),
+ TP_fast_assign(
+ __entry->pending_mask = pending_mask;
+ __entry->ctl_flush = ctl_flush;
+ ),
+ TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+ __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+ dpu_hw_ctl_trigger_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+
#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 2955282..8d24b79 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include "dpu_vbif.h"
#include "dpu_hw_vbif.h"
@@ -191,7 +184,7 @@
ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
- goto exit;
+ return;
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
@@ -210,8 +203,6 @@
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
- return;
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
@@ -274,11 +265,6 @@
struct dpu_hw_vbif *vbif;
u32 i, pnd, src;
- if (!dpu_kms) {
- DPU_ERROR("invalid argument\n");
- return;
- }
-
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->ops.clear_errors) {
@@ -296,11 +282,6 @@
struct dpu_hw_vbif *vbif;
int i, j;
- if (!dpu_kms) {
- DPU_ERROR("invalid argument\n");
- return;
- }
-
for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
vbif = dpu_kms->hw_vbif[i];
if (vbif && vbif->cap && vbif->ops.set_mem_type) {
@@ -312,31 +293,21 @@
}
#ifdef CONFIG_DEBUG_FS
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_vbif);
- dpu_kms->debugfs_vbif = NULL;
-}
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
char vbif_name[32];
- struct dentry *debugfs_vbif;
+ struct dentry *entry, *debugfs_vbif;
int i, j;
- dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
- if (!dpu_kms->debugfs_vbif) {
- DPU_ERROR("failed to create vbif debugfs\n");
- return -EINVAL;
- }
+ entry = debugfs_create_dir("vbif", debugfs_root);
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
- debugfs_vbif = debugfs_create_dir(vbif_name,
- dpu_kms->debugfs_vbif);
+ debugfs_vbif = debugfs_create_dir(vbif_name, entry);
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);
@@ -378,7 +349,5 @@
(u32 *)&cfg->ot_limit);
}
}
-
- return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index f17af52..ab49017 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DPU_VBIF_H__
@@ -78,17 +70,6 @@
*/
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
-#ifdef CONFIG_DEBUG_FS
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
-#else
-static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
- struct dentry *debugfs_root)
-{
- return 0;
-}
-static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-}
-#endif
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
index 4f12e5c..9fc9dbd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -813,18 +813,6 @@
#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
#define COLOR_FMT_P010 COLOR_FMT_P010
-static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
-{
- (void)height;
- (void)width;
-
- /*
- * In the future, calculate the size based on the w/h but just
- * hardcode it for now since 16K satisfies all current usecases.
- */
- return 16 * 1024;
-}
-
/*
* Function arguments:
* @color_fmt
@@ -832,114 +820,99 @@
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
{
- unsigned int alignment, stride = 0;
+ unsigned int stride = 0;
if (!width)
- goto invalid_input;
-
- switch (color_fmt) {
- case COLOR_FMT_NV21:
- case COLOR_FMT_NV12:
- case COLOR_FMT_NV12_MVTB:
- case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
- break;
- case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
- break;
- case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
- break;
- case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
- break;
- }
-invalid_input:
- return stride;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- */
-static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
-{
- unsigned int alignment, stride = 0;
-
- if (!width)
- goto invalid_input;
-
- switch (color_fmt) {
- case COLOR_FMT_NV21:
- case COLOR_FMT_NV12:
- case COLOR_FMT_NV12_MVTB:
- case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
- break;
- case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
- break;
- case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
- break;
- case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
- break;
- }
-invalid_input:
- return stride;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @height
- * Progressive: height
- * Interlaced: (height+1)>>1
- */
-static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
-{
- unsigned int alignment, sclines = 0;
-
- if (!height)
- goto invalid_input;
-
- switch (color_fmt) {
- case COLOR_FMT_NV21:
- case COLOR_FMT_NV12:
- case COLOR_FMT_NV12_MVTB:
- case COLOR_FMT_NV12_UBWC:
- case COLOR_FMT_P010:
- alignment = 32;
- break;
- case COLOR_FMT_NV12_BPP10_UBWC:
- case COLOR_FMT_P010_UBWC:
- alignment = 16;
- break;
- default:
return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 128);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
+ break;
+ case COLOR_FMT_P010:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
+ break;
}
- sclines = MSM_MEDIA_ALIGN(height, alignment);
-invalid_input:
+
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int stride = 0;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 128);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
+ break;
+ case COLOR_FMT_P010:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
+ break;
+ }
+
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int sclines = 0;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ sclines = MSM_MEDIA_ALIGN(height, 32);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ sclines = MSM_MEDIA_ALIGN(height, 16);
+ break;
+ }
+
return sclines;
}
@@ -950,12 +923,12 @@
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment, sclines = 0;
+ unsigned int sclines = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@@ -964,18 +937,13 @@
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
case COLOR_FMT_P010:
- alignment = 16;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
break;
case COLOR_FMT_NV12_UBWC:
- alignment = 32;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
break;
- default:
- goto invalid_input;
}
- sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
-
-invalid_input:
return sclines;
}
@@ -986,12 +954,12 @@
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
{
- int y_tile_width = 0, y_meta_stride = 0;
+ int y_tile_width = 0, y_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1002,14 +970,11 @@
y_tile_width = 48;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
- y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
-
-invalid_input:
- return y_meta_stride;
+ return MSM_MEDIA_ALIGN(y_meta_stride, 64);
}
/*
@@ -1019,12 +984,12 @@
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
{
- int y_tile_height = 0, y_meta_scanlines = 0;
+ int y_tile_height = 0, y_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1035,14 +1000,11 @@
y_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
- y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
-
-invalid_input:
- return y_meta_scanlines;
+ return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
}
/*
@@ -1052,12 +1014,12 @@
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
{
- int uv_tile_width = 0, uv_meta_stride = 0;
+ int uv_tile_width = 0, uv_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1068,14 +1030,11 @@
uv_tile_width = 24;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
- uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
-
-invalid_input:
- return uv_meta_stride;
+ return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
}
/*
@@ -1085,12 +1044,12 @@
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
{
- int uv_tile_height = 0, uv_meta_scanlines = 0;
+ int uv_tile_height = 0, uv_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1101,22 +1060,19 @@
uv_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
- uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
-
-invalid_input:
- return uv_meta_scanlines;
+ return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
}
-static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
{
- unsigned int alignment = 0, stride = 0, bpp = 4;
+ unsigned int alignment = 0, bpp = 4;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1131,21 +1087,18 @@
alignment = 256;
break;
default:
- goto invalid_input;
+ return 0;
}
- stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
-
-invalid_input:
- return stride;
+ return MSM_MEDIA_ALIGN(width * bpp, alignment);
}
-static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment = 0, scanlines = 0;
+ unsigned int alignment = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1157,220 +1110,46 @@
alignment = 16;
break;
default:
- goto invalid_input;
+ return 0;
}
- scanlines = MSM_MEDIA_ALIGN(height, alignment);
-
-invalid_input:
- return scanlines;
+ return MSM_MEDIA_ALIGN(height, alignment);
}
-static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
{
- int rgb_tile_width = 0, rgb_meta_stride = 0;
+ int rgb_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_width = 16;
- break;
- default:
- goto invalid_input;
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+ return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
}
- rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
- rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
-
-invalid_input:
- return rgb_meta_stride;
+ return 0;
}
-static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
{
- int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+ int rgb_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_height = 4;
- break;
- default:
- goto invalid_input;
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+ return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
}
- rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
- rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
-
-invalid_input:
- return rgb_meta_scanlines;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- * @height
- * Progressive: height
- * Interlaced: height
- */
-static inline unsigned int VENUS_BUFFER_SIZE(
- int color_fmt, int width, int height)
-{
- const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
- unsigned int uv_alignment = 0, size = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
- unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
- unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
- unsigned int y_meta_plane = 0, uv_meta_plane = 0;
- unsigned int rgb_stride = 0, rgb_scanlines = 0;
- unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
- unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
-
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
- rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
-
- switch (color_fmt) {
- case COLOR_FMT_NV21:
- case COLOR_FMT_NV12:
- case COLOR_FMT_P010:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane +
- MSM_MEDIA_MAX(extra_size, 8 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_MVTB:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane;
- size = 2 * size + extra_size;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_UBWC:
- y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines =
- VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines =
- VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane)*2 +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_BPP10_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_P010_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888:
- rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
- size = rgb_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888_UBWC:
- case COLOR_FMT_RGBA1010102_UBWC:
- case COLOR_FMT_RGB565_UBWC:
- rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
- 4096);
- rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
- rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
- height);
- rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
- rgb_meta_scanlines, 4096);
- size = rgb_ubwc_plane + rgb_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- default:
- break;
- }
-invalid_input:
- return size;
-}
-
-static inline unsigned int VENUS_VIEW2_OFFSET(
- int color_fmt, int width, int height)
-{
- unsigned int offset = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- switch (color_fmt) {
- case COLOR_FMT_NV12_MVTB:
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines;
- offset = y_plane + uv_plane;
- break;
- default:
- break;
- }
-invalid_input:
- return offset;
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 457c29d..f34dca5 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -1,24 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mdp4_kms.h"
@@ -128,7 +118,7 @@
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -244,14 +234,8 @@
mode = &crtc->state->adjusted_mode;
- DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mdp4_crtc->name, mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("%s: set mode: " DRM_MODE_FMT,
+ mdp4_crtc->name, DRM_MODE_ARG(mode));
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
@@ -384,7 +368,7 @@
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
- msm_gem_get_iova(next_bo, kms->aspace, &iova);
+ msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -429,7 +413,7 @@
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -442,7 +426,7 @@
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 6a1ebda..772f075 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -1,24 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2014, Inforce Computing. All rights reserved.
*
* Author: Vinay Simha <vinaysimha@inforcecomputing.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mdp4_kms.h"
@@ -58,14 +47,7 @@
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
ctrl_pol = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index ba8e587..5d89560 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -1,22 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mdp4_kms.h"
@@ -45,7 +34,7 @@
struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
if (!dtv_pdata) {
- dev_err(dev->dev, "could not find dtv pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
return;
}
@@ -104,14 +93,7 @@
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
mdp4_dtv_encoder->pixclock = mode->clock * 1000;
@@ -209,16 +191,16 @@
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
pc, ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
- dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
@@ -258,14 +240,14 @@
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
- dev_err(dev->dev, "failed to get hdmi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
goto fail;
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
- dev_err(dev->dev, "failed to get tv_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
index b764d7f..4d49f3b 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
@@ -1,21 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "mdp4_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 44d1cda..50711cc 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -1,20 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
+
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "msm_gem.h"
@@ -43,7 +35,7 @@
DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
- dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@@ -104,40 +96,51 @@
return ret;
}
-static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+static void mdp4_enable_commit(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+}
+
+static void mdp4_disable_commit(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_disable(mdp4_kms);
+}
+
+static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
int i;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- mdp4_enable(mdp4_kms);
-
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_get(crtc);
}
-static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
{
- struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
-
- drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
-
- /* see 119ecb7fd */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
- drm_crtc_vblank_put(crtc);
-
- mdp4_disable(mdp4_kms);
+ /* TODO */
}
-static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms,
- struct drm_crtc *crtc)
+static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
{
- mdp4_crtc_wait_for_commit_done(crtc);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
+ mdp4_crtc_wait_for_commit_done(crtc);
+}
+
+static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_crtc *crtc;
+
+ /* see 119ecb7fd */
+ for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
+ drm_crtc_vblank_put(crtc);
}
static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -165,7 +168,7 @@
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
@@ -189,9 +192,12 @@
.irq = mdp4_irq,
.enable_vblank = mdp4_enable_vblank,
.disable_vblank = mdp4_disable_vblank,
+ .enable_commit = mdp4_enable_commit,
+ .disable_commit = mdp4_disable_commit,
.prepare_commit = mdp4_prepare_commit,
+ .flush_commit = mdp4_flush_commit,
+ .wait_flush = mdp4_wait_flush,
.complete_commit = mdp4_complete_commit,
- .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
.get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
.destroy = mdp4_destroy,
@@ -206,7 +212,8 @@
clk_disable_unprepare(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
- clk_disable_unprepare(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_disable_unprepare(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_disable_unprepare(mdp4_kms->axi_clk);
@@ -220,7 +227,8 @@
clk_prepare_enable(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
- clk_prepare_enable(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_prepare_enable(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_prepare_enable(mdp4_kms->axi_clk);
@@ -251,7 +259,7 @@
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct LCDC encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
return PTR_ERR(encoder);
}
@@ -260,7 +268,7 @@
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
- dev_err(dev->dev, "failed to initialize LVDS connector\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
return PTR_ERR(connector);
}
@@ -271,7 +279,7 @@
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct DTV encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
return PTR_ERR(encoder);
}
@@ -282,7 +290,7 @@
/* Construct bridge/connector for HDMI: */
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
}
}
@@ -300,7 +308,7 @@
encoder = mdp4_dsi_encoder_init(dev);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct DSI encoder: %d\n", ret);
return ret;
}
@@ -311,14 +319,14 @@
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize DSI: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
return ret;
}
break;
default:
- dev_err(dev->dev, "Invalid or unsupported interface\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
return -EINVAL;
}
@@ -354,7 +362,7 @@
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
plane = mdp4_plane_init(dev, vg_planes[i], false);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for VG%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -365,7 +373,7 @@
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
plane = mdp4_plane_init(dev, rgb_planes[i], true);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for RGB%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -374,7 +382,7 @@
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for %s\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
mdp4_crtc_names[i]);
ret = PTR_ERR(crtc);
goto fail;
@@ -396,7 +404,7 @@
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
- dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
i, ret);
goto fail;
}
@@ -419,7 +427,7 @@
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
- dev_err(dev->dev, "failed to allocate kms\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
ret = -ENOMEM;
goto fail;
}
@@ -439,7 +447,7 @@
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -456,14 +464,14 @@
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
goto fail;
}
}
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk)) {
- dev_err(dev->dev, "failed to get core_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
ret = PTR_ERR(mdp4_kms->clk);
goto fail;
}
@@ -472,23 +480,25 @@
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
- // XXX if (rev >= MDP_REV_42) { ???
- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
- if (IS_ERR(mdp4_kms->lut_clk)) {
- dev_err(dev->dev, "failed to get lut_clk\n");
- ret = PTR_ERR(mdp4_kms->lut_clk);
- goto fail;
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
}
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
- dev_err(dev->dev, "failed to get axi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
goto fail;
}
clk_set_rate(mdp4_kms->clk, config->max_clk);
- clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ if (mdp4_kms->lut_clk)
+ clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
@@ -519,29 +529,29 @@
if (ret)
goto fail;
} else {
- dev_info(dev->dev, "no iommu, fallback to phys "
+ DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
aspace = NULL;
}
ret = modeset_init(mdp4_kms);
if (ret) {
- dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
- mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
- dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
mdp4_kms->blank_cursor_bo = NULL;
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
- dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index 0c13f86..18933bd 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP4_KMS_H__
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 2bfb390..871f351 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -1,23 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
* Author: Vinay Simha <vinaysimha@inforcecomputing.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
+
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mdp4_kms.h"
@@ -47,7 +38,7 @@
struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
if (!lcdc_pdata) {
- dev_err(dev->dev, "could not find lvds pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
return;
}
@@ -224,7 +215,7 @@
break;
default:
- dev_err(dev->dev, "unknown bpp: %d\n", bpp);
+ DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
return;
}
@@ -241,7 +232,7 @@
MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
break;
default:
- dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
+ DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
return;
}
@@ -273,14 +264,7 @@
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
mdp4_lcdc_encoder->pixclock = mode->clock * 1000;
@@ -361,7 +345,7 @@
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
bs_set(mdp4_lcdc_encoder, 0);
@@ -377,20 +361,25 @@
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
+ uint32_t config;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
/* TODO: hard-coded for 18bpp: */
- mdp4_crtc_set_config(encoder->crtc,
- MDP4_DMA_CONFIG_R_BPC(BPC6) |
- MDP4_DMA_CONFIG_G_BPC(BPC6) |
- MDP4_DMA_CONFIG_B_BPC(BPC6) |
- MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
- MDP4_DMA_CONFIG_PACK(0x21) |
- MDP4_DMA_CONFIG_DEFLKR_EN |
- MDP4_DMA_CONFIG_DITHER_EN);
+ config =
+ MDP4_DMA_CONFIG_R_BPC(BPC6) |
+ MDP4_DMA_CONFIG_G_BPC(BPC6) |
+ MDP4_DMA_CONFIG_B_BPC(BPC6) |
+ MDP4_DMA_CONFIG_PACK(0x21) |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN;
+
+ if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+ config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+ mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
bs_set(mdp4_lcdc_encoder, 1);
@@ -398,16 +387,16 @@
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
}
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
if (ret)
- dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (!IS_ERR(panel)) {
@@ -461,7 +450,7 @@
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
- dev_err(dev->dev, "failed to get lvds_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
goto fail;
}
@@ -470,7 +459,7 @@
reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[0] = reg;
@@ -478,7 +467,7 @@
reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[1] = reg;
@@ -486,7 +475,7 @@
reg = devm_regulator_get(dev->dev, "lvds-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[2] = reg;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index 5368e62..9262ed2 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
* Author: Vinay Simha <vinaysimha@inforcecomputing.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/gpio.h>
-
#include "mdp4_kms.h"
struct mdp4_lvds_connector {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ce42459..ab8c0c1 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 79ff653..da3cc1d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -1,20 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "mdp4_kms.h"
#define DOWN_SCALE_MAX 8
@@ -68,7 +60,6 @@
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -235,22 +226,22 @@
format = to_mdp_format(msm_framebuffer_format(fb));
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Width down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
return -ERANGE;
}
if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Height down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_w > (src_w * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Width up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_h > (src_h * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Height up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
return -ERANGE;
}
@@ -392,6 +383,8 @@
mdp4_plane_install_properties(plane, &plane->base);
+ drm_plane_enable_fb_damage_clips(plane);
+
return plane;
fail:
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d..f6e71ff 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "mdp5_kms.h"
@@ -553,13 +545,211 @@
.max_clk = 412500000,
};
-static const struct mdp5_cfg_handler cfg_handlers[] = {
+const struct mdp5_cfg_hw msm8917_config = {
+ .name = "msm8917",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
+const struct mdp5_cfg_hw msm8998_config = {
+ .name = "msm8998",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf7ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000, 0x2a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 2,
+ .base = { 0x34000, 0x36000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 15, .config = { .hw = &msm8917_config } },
+};
+
+static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
+ { .revision = 0, .config = { .hw = &msm8998_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
@@ -590,8 +780,9 @@
struct drm_device *dev = mdp5_kms->dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp5_cfg_handler *cfg_handler;
+ const struct mdp5_cfg_handler *cfg_handlers;
struct mdp5_cfg_platform *pconfig;
- int i, ret = 0;
+ int i, ret = 0, num_handlers;
cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
@@ -599,15 +790,24 @@
goto fail;
}
- if (major != 1) {
- dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ switch (major) {
+ case 1:
+ cfg_handlers = cfg_handlers_v1;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v1);
+ break;
+ case 3:
+ cfg_handlers = cfg_handlers_v3;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v3);
+ break;
+ default:
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
- }
+ };
/* only after mdp5_cfg global pointer's init can we access the hw */
- for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
+ for (i = 0; i < num_handlers; i++) {
if (cfg_handlers[i].revision != minor)
continue;
mdp5_cfg = cfg_handlers[i].config.hw;
@@ -615,7 +815,7 @@
break;
}
if (unlikely(!mdp5_cfg)) {
- dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
@@ -635,7 +835,7 @@
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
- return NULL;
+ return ERR_PTR(ret);
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index 75910d0..1c50d01 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MDP5_CFG_H__
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index d6f79dc..eeef41f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mdp5_kms.h"
@@ -55,20 +47,20 @@
int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
- dev_err(dev, "vsync_clk is not initialized\n");
+ DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
return -EINVAL;
}
- total_lines_x100 = mode->vtotal * mode->vrefresh;
+ total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines_x100) {
- dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
- __func__, mode->vtotal, mode->vrefresh);
+ DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ __func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
}
vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
if (vsync_clk_speed <= 0) {
- dev_err(dev, "vsync_clk round rate failed %ld\n",
+ DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
vsync_clk_speed);
return -EINVAL;
}
@@ -102,13 +94,13 @@
ret = clk_set_rate(mdp5_kms->vsync_clk,
clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_set_rate failed, %d\n", ret);
return ret;
}
ret = clk_prepare_enable(mdp5_kms->vsync_clk);
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_prepare_enable failed, %d\n", ret);
return ret;
}
@@ -134,14 +126,7 @@
{
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
pingpong_tearcheck_setup(encoder, mode);
mdp5_crtc_set_pipeline(encoder->crtc);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index b1da9ce..eb0b4b7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1,26 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/sort.h>
+
#include <drm/drm_mode.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "mdp5_kms.h"
@@ -173,7 +165,7 @@
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -384,14 +376,7 @@
mode = &crtc->state->adjusted_mode;
- DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- crtc->name, mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
mixer_width = mode->hdisplay;
if (r_mixer)
@@ -457,6 +442,18 @@
mdp5_crtc->enabled = false;
}
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ u32 count;
+
+ count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+ drm_crtc_set_max_vblank_count(crtc, count);
+
+ drm_crtc_vblank_on(crtc);
+}
+
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -493,7 +490,7 @@
}
/* Restore vblank irq handling after power is enabled */
- drm_crtc_vblank_on(crtc);
+ mdp5_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc);
@@ -662,7 +659,7 @@
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
if (ret) {
- dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
return ret;
}
@@ -679,7 +676,7 @@
* and that we don't have conflicting mixer stages:
*/
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
- dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
cnt, start);
return -EINVAL;
}
@@ -789,6 +786,7 @@
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -807,7 +805,7 @@
width = mdp5_crtc->cursor.width;
height = mdp5_crtc->cursor.height;
- stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
+ stride = width * info->cpp[0];
get_roi(crtc, &roi_w, &roi_h);
@@ -879,7 +877,7 @@
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -903,7 +901,7 @@
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
@@ -924,7 +922,7 @@
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
- dev_err(dev->dev, "failed to %sable cursor: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
goto end;
}
@@ -1009,23 +1007,6 @@
drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
}
-static void mdp5_crtc_reset(struct drm_crtc *crtc)
-{
- struct mdp5_crtc_state *mdp5_cstate;
-
- if (crtc->state) {
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
- kfree(to_mdp5_crtc_state(crtc->state));
- }
-
- mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
-
- if (mdp5_cstate) {
- mdp5_cstate->base.crtc = crtc;
- crtc->state = &mdp5_cstate->base;
- }
-}
-
static struct drm_crtc_state *
mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
{
@@ -1053,6 +1034,19 @@
kfree(mdp5_cstate);
}
+static void mdp5_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
+
+ if (crtc->state)
+ mdp5_crtc_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+
+ drm_crtc_vblank_reset(crtc);
+}
+
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index f93d568..030279d 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "mdp5_kms.h"
@@ -261,14 +253,14 @@
u32 blend_cfg;
struct mdp5_hw_mixer *mixer = pipeline->mixer;
- if (unlikely(WARN_ON(!mixer))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ if (WARN_ON(!mixer)) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
ctl->id);
return -EINVAL;
}
if (pipeline->r_mixer) {
- dev_err(ctl_mgr->dev->dev, "unsupported configuration");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
@@ -604,10 +596,10 @@
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
- dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
return -EINVAL;
} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
- dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
return -EINVAL;
}
@@ -652,7 +644,7 @@
if ((ctl_mgr->ctls[c].status & checkm) == match)
goto found;
- dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
found:
@@ -698,13 +690,13 @@
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
- dev_err(dev->dev, "failed to allocate CTL manager\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
- if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
- dev_err(dev->dev, "Increase static pool size to at least %d\n",
+ if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
+ DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
@@ -723,7 +715,7 @@
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
- dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+ DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
index 403b0db..c2af68a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MDP5_CTL_H__
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index fcd44d1..f488272 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -1,23 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mdp5_kms.h"
@@ -118,14 +107,7 @@
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
ctrl_pol = 0;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
index 280e368..9b4c8d9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
@@ -1,23 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irq.h>
#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "mdp5_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index bddd625..91cd76a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -1,23 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
+#include <linux/interconnect.h>
#include <linux/of_irq.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_vblank.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -144,46 +140,58 @@
state->mdp5_kms = mdp5_kms;
- drm_atomic_private_obj_init(&mdp5_kms->glob_state,
+ drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
&state->base,
&mdp5_global_state_funcs);
return 0;
}
+static void mdp5_enable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+}
+
+static void mdp5_disable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
+}
+
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
- pm_runtime_get_sync(dev);
-
if (mdp5_kms->smp)
mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
}
-static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ /* TODO */
+}
+
+static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct device *dev = &mdp5_kms->pdev->dev;
- struct mdp5_global_state *global_state;
+ struct drm_crtc *crtc;
- drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+ for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
+ mdp5_crtc_wait_for_commit_done(crtc);
+}
+
+static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct mdp5_global_state *global_state;
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
-
- pm_runtime_put_sync(dev);
-}
-
-static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
- struct drm_crtc *crtc)
-{
- mdp5_crtc_wait_for_commit_done(crtc);
}
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -264,7 +272,7 @@
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
return ret;
}
@@ -281,9 +289,12 @@
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
+ .flush_commit = mdp5_flush_commit,
+ .enable_commit = mdp5_enable_commit,
+ .disable_commit = mdp5_disable_commit,
.prepare_commit = mdp5_prepare_commit,
+ .wait_flush = mdp5_wait_flush,
.complete_commit = mdp5_complete_commit,
- .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
@@ -337,7 +348,7 @@
encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
return encoder;
}
@@ -418,7 +429,7 @@
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
- dev_err(dev->dev, "failed to find dsi from intf %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
@@ -443,7 +454,7 @@
break;
}
default:
- dev_err(dev->dev, "unknown intf: %d\n", intf->type);
+ DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
@@ -500,7 +511,7 @@
plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
@@ -517,7 +528,7 @@
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
@@ -552,7 +563,7 @@
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
- dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
+ DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -561,7 +572,7 @@
struct device *dev = &pdev->dev;
struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
- dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
if (IS_ERR(clk))
@@ -673,6 +684,7 @@
struct msm_kms *kms;
struct msm_gem_address_space *aspace;
int irq, i, ret;
+ struct device *iommu_dev;
/* priv->kms would have been populated by the MDP5 driver */
kms = priv->kms;
@@ -688,7 +700,7 @@
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq < 0) {
ret = irq;
- dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -712,7 +724,11 @@
mdelay(16);
if (config->platform.iommu) {
- aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu_dev = &pdev->dev;
+ if (!iommu_dev->iommu_fwspec)
+ iommu_dev = iommu_dev->parent;
+
+ aspace = msm_gem_address_space_create(iommu_dev,
config->platform.iommu, "mdp5");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
@@ -724,12 +740,12 @@
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
- dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
goto fail;
}
} else {
- dev_info(&pdev->dev,
+ DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
aspace = NULL;
}
@@ -738,7 +754,7 @@
ret = modeset_init(mdp5_kms);
if (ret) {
- dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
@@ -750,7 +766,7 @@
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
- dev->max_vblank_count = 0xffffffff;
+ dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
return kms;
@@ -795,7 +811,7 @@
hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
- dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
pipe2name(pipes[i]), ret);
return ret;
}
@@ -867,7 +883,7 @@
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
- dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
i, ret);
return ret;
}
@@ -897,7 +913,7 @@
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf) {
- dev_err(dev->dev, "failed to construct INTF%d\n", i);
+ DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
}
@@ -1048,9 +1064,46 @@
.unbind = mdp5_unbind,
};
+static int mdp5_setup_interconnect(struct platform_device *pdev)
+{
+ struct icc_path *path0 = of_icc_get(&pdev->dev, "mdp0-mem");
+ struct icc_path *path1 = of_icc_get(&pdev->dev, "mdp1-mem");
+ struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator-mem");
+
+ if (IS_ERR(path0))
+ return PTR_ERR(path0);
+
+ if (!path0) {
+ /* no interconnect support is not necessarily a fatal
+ * condition, the platform may simply not have an
+ * interconnect driver yet. But warn about it in case
+ * bootloader didn't setup bus clocks high enough for
+ * scanout.
+ */
+ dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
+ return 0;
+ }
+
+ icc_set_bw(path0, 0, MBps_to_icc(6400));
+
+ if (!IS_ERR_OR_NULL(path1))
+ icc_set_bw(path1, 0, MBps_to_icc(6400));
+ if (!IS_ERR_OR_NULL(path_rot))
+ icc_set_bw(path_rot, 0, MBps_to_icc(6400));
+
+ return 0;
+}
+
static int mdp5_dev_probe(struct platform_device *pdev)
{
+ int ret;
+
DBG("");
+
+ ret = mdp5_setup_interconnect(pdev);
+ if (ret)
+ return ret;
+
return component_add(&pdev->dev, &mdp5_ops);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index 854dfd3..d1bf4fd 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP5_KMS_H__
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 1cc4e57..09bd46a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irqdomain.h>
@@ -132,7 +121,7 @@
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
mdp5_mdss);
if (!d) {
- dev_err(dev, "mdss irq domain add failed\n");
+ DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
@@ -246,7 +235,7 @@
ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
@@ -259,7 +248,7 @@
ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
goto fail;
}
@@ -267,13 +256,13 @@
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 113e6b5..954db68 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp5_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 9be94f5..43c9ba4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -1,17 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP5_LM_H__
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index 1ef26bc..ba66959 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mdp5_kms.h"
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index bb2b0ac..9b26d07 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP5_PIPE_H__
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 273cbbe..8342309 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+
#include "mdp5_kms.h"
struct mdp5_plane {
@@ -46,7 +38,6 @@
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -126,7 +117,7 @@
SET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -154,7 +145,7 @@
GET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -185,7 +176,7 @@
struct mdp5_plane_state *mdp5_state;
if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ drm_framebuffer_put(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
@@ -228,7 +219,7 @@
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
if (state->fb)
- drm_framebuffer_unreference(state->fb);
+ drm_framebuffer_put(state->fb);
kfree(pstate);
}
@@ -503,6 +494,8 @@
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->crtc_x = new_state->crtc_x;
@@ -525,6 +518,8 @@
*to_mdp5_plane_state(plane->state) =
*to_mdp5_plane_state(new_state);
+
+ new_state->fb = old_fb;
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
@@ -651,23 +646,21 @@
uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasex_steps[COMP_MAX])
{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct device *dev = mdp5_kms->dev->dev;
uint32_t phasex_step;
- unsigned int hsub;
int ret;
ret = calc_phase_step(src, dest, &phasex_step);
if (ret) {
- dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
- hsub = drm_format_horz_chroma_subsampling(pixel_format);
-
phasex_steps[COMP_0] = phasex_step;
phasex_steps[COMP_3] = phasex_step;
- phasex_steps[COMP_1_2] = phasex_step / hsub;
+ phasex_steps[COMP_1_2] = phasex_step / info->hsub;
return 0;
}
@@ -676,23 +669,21 @@
uint32_t pixel_format, uint32_t src, uint32_t dest,
uint32_t phasey_steps[COMP_MAX])
{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(plane);
struct device *dev = mdp5_kms->dev->dev;
uint32_t phasey_step;
- unsigned int vsub;
int ret;
ret = calc_phase_step(src, dest, &phasey_step);
if (ret) {
- dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
- vsub = drm_format_vert_chroma_subsampling(pixel_format);
-
phasey_steps[COMP_0] = phasey_step;
phasey_steps[COMP_3] = phasey_step;
- phasey_steps[COMP_1_2] = phasey_step / vsub;
+ phasey_steps[COMP_1_2] = phasey_step / info->vsub;
return 0;
}
@@ -700,8 +691,9 @@
static uint32_t get_scale_config(const struct mdp_format *format,
uint32_t src, uint32_t dst, bool horz)
{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
bool scaling = format->is_yuv ? true : (src != dst);
- uint32_t sub, pix_fmt = format->base.pixel_format;
+ uint32_t sub;
uint32_t ya_filter, uv_filter;
bool yuv = format->is_yuv;
@@ -709,8 +701,7 @@
return 0;
if (yuv) {
- sub = horz ? drm_format_horz_chroma_subsampling(pix_fmt) :
- drm_format_vert_chroma_subsampling(pix_fmt);
+ sub = horz ? info->hsub : info->vsub;
uv_filter = ((src / sub) <= dst) ?
SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
}
@@ -755,7 +746,7 @@
uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
{
- uint32_t pix_fmt = format->base.pixel_format;
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
uint32_t lr, tb, req;
int i;
@@ -764,8 +755,8 @@
uint32_t roi_h = src_h;
if (format->is_yuv && i == COMP_1_2) {
- roi_w /= drm_format_horz_chroma_subsampling(pix_fmt);
- roi_h /= drm_format_vert_chroma_subsampling(pix_fmt);
+ roi_w /= info->hsub;
+ roi_h /= info->vsub;
}
lr = (pe_left[i] >= 0) ?
@@ -1100,6 +1091,8 @@
mdp5_plane_install_properties(plane, &plane->base);
+ drm_plane_enable_fb_damage_clips(plane);
+
return plane;
fail:
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 96c2b82..b31cfb5 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -1,21 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_fourcc.h>
+#include <drm/drm_util.h>
#include "mdp5_kms.h"
#include "mdp5_smp.h"
@@ -88,7 +79,7 @@
avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
- dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
return -ENOSPC;
}
@@ -126,14 +117,15 @@
const struct mdp_format *format,
u32 width, bool hdecim)
{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
struct mdp5_kms *mdp5_kms = get_kms(smp);
int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
int i, hsub, nplanes, nlines;
u32 fmt = format->base.pixel_format;
uint32_t blkcfg = 0;
- nplanes = drm_format_num_planes(fmt);
- hsub = drm_format_horz_chroma_subsampling(fmt);
+ nplanes = info->num_planes;
+ hsub = info->hsub;
/* different if BWC (compressed framebuffer?) enabled: */
nlines = 2;
@@ -156,7 +148,7 @@
for (i = 0; i < nplanes; i++) {
int n, fetch_stride, cpp;
- cpp = drm_format_plane_cpp(fmt, i);
+ cpp = info->cpp[i];
fetch_stride = width * cpp / (i ? hsub : 1);
n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
@@ -188,7 +180,7 @@
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, state, cid, n);
if (ret) {
- dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index b41d044..ba5618e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP5_SMP_H__
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index 005760b..5495d8b 100644
--- a/drivers/gpu/drm/msm/disp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
@@ -185,7 +174,7 @@
struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type)
{
- if (unlikely(WARN_ON(type >= CSC_MAX)))
+ if (WARN_ON(type >= CSC_MAX))
return NULL;
return &csc_convert[type];
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c
index 6428730..3c35ccf 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 4fa8dbe..1535c56 100644
--- a/drivers/gpu/drm/msm/disp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MDP_KMS_H__
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ff8164c..55ea4bc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dsi.h"
@@ -29,7 +21,7 @@
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -40,7 +32,7 @@
of_node_put(phy_node);
if (!phy_pdev || !msm_dsi->phy) {
- dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
@@ -83,6 +75,7 @@
return ERR_PTR(-ENOMEM);
DBG("dsi probed=%p", msm_dsi);
+ msm_dsi->id = -1;
msm_dsi->pdev = pdev;
platform_set_drvdata(pdev, msm_dsi);
@@ -117,8 +110,13 @@
DBG("");
msm_dsi = dsi_init(pdev);
- if (IS_ERR(msm_dsi))
- return PTR_ERR(msm_dsi);
+ if (IS_ERR(msm_dsi)) {
+ /* Don't fail the bind if the dsi port is not connected */
+ if (PTR_ERR(msm_dsi) == -ENODEV)
+ return 0;
+ else
+ return PTR_ERR(msm_dsi);
+ }
priv->dsi[msm_dsi->id] = msm_dsi;
@@ -204,7 +202,7 @@
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
- dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
goto fail;
}
@@ -216,7 +214,7 @@
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
- dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
msm_dsi->bridge = NULL;
goto fail;
}
@@ -238,12 +236,14 @@
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;
}
+ msm_dsi_manager_setup_encoder(msm_dsi->id);
+
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
priv->connectors[priv->num_connectors++] = msm_dsi->connector;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 08f3fc6..0da8a4e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DSI_CONNECTOR_H__
@@ -79,7 +71,6 @@
*/
struct drm_panel *panel;
struct drm_bridge *external_bridge;
- unsigned long device_flags;
struct device *phy_dev;
bool phy_enabled;
@@ -97,7 +88,7 @@
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
-void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
+void msm_dsi_manager_setup_encoder(int id);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
bool msm_dsi_manager_validate_current_config(u8 id);
@@ -168,9 +159,9 @@
bool is_dual_dsi);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
- struct drm_display_mode *mode);
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
- unsigned long *panel_flags);
+ const struct drm_display_mode *mode);
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
+unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index dcdfb1b..b7b7c1a 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dsi_cfg.h"
@@ -118,6 +110,25 @@
.num_dsi = 2,
};
+static const char * const dsi_msm8998_bus_clk_names[] = {
+ "iface", "bus", "core",
+};
+
+static const struct msm_dsi_config msm8998_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 2,
+ .regs = {
+ {"vdd", 367000, 16 }, /* 0.9 V */
+ {"vdda", 62800, 2 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_msm8998_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
+ .io_start = { 0xc994000, 0xc996000 },
+ .num_dsi = 2,
+};
+
static const char * const dsi_sdm845_bus_clk_names[] = {
"iface", "bus",
};
@@ -186,6 +197,8 @@
&msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
&msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
+ &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 16c5079..e2b7a7d 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MSM_DSI_CFG_H__
@@ -25,6 +17,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
+#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 96fb5f6..1e7b1be 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1,31 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/of_graph.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
+
#include <video/mipi_display.h>
#include "dsi.h"
@@ -34,6 +26,8 @@
#include "dsi_cfg.h"
#include "msm_kms.h"
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
@@ -429,15 +423,15 @@
}
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
- if (!msm_host->byte_clk_src) {
- ret = -ENODEV;
+ if (IS_ERR(msm_host->byte_clk_src)) {
+ ret = PTR_ERR(msm_host->byte_clk_src);
pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
- if (!msm_host->pixel_clk_src) {
- ret = -ENODEV;
+ if (IS_ERR(msm_host->pixel_clk_src)) {
+ ret = PTR_ERR(msm_host->pixel_clk_src);
pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
@@ -994,7 +988,7 @@
wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
}
@@ -1049,8 +1043,8 @@
ret = wait_for_completion_timeout(&msm_host->video_comp,
msecs_to_jiffies(70));
- if (ret <= 0)
- dev_err(dev, "wait for video done timed out\n");
+ if (ret == 0)
+ DRM_DEV_ERROR(dev, "wait for video done timed out\n");
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
@@ -1083,6 +1077,8 @@
return PTR_ERR(data);
}
+ msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
msm_host->tx_size = msm_host->tx_gem_obj->size;
return 0;
@@ -1118,7 +1114,7 @@
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+ msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
@@ -1248,7 +1244,7 @@
if (!dma_base)
return -EINVAL;
- return msm_gem_get_iova(msm_host->tx_gem_obj,
+ return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
priv->kms->aspace, dma_base);
}
@@ -1402,7 +1398,7 @@
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0);
@@ -1596,8 +1592,6 @@
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
- msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
-
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
@@ -1673,7 +1667,7 @@
prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
- dev_dbg(dev,
+ DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
return 0;
}
@@ -1681,7 +1675,7 @@
num_lanes = len / sizeof(u32);
if (num_lanes < 1 || num_lanes > 4) {
- dev_err(dev, "bad number of data lanes\n");
+ DRM_DEV_ERROR(dev, "bad number of data lanes\n");
return -EINVAL;
}
@@ -1690,7 +1684,7 @@
ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
- dev_err(dev, "failed to read lane data\n");
+ DRM_DEV_ERROR(dev, "failed to read lane data\n");
return ret;
}
@@ -1711,7 +1705,7 @@
*/
for (j = 0; j < num_lanes; j++) {
if (lane_map[j] < 0 || lane_map[j] > 3)
- dev_err(dev, "bad physical lane entry %u\n",
+ DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
lane_map[j]);
if (swap[lane_map[j]] != j)
@@ -1742,21 +1736,23 @@
*/
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
- dev_dbg(dev, "%s: no endpoint\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
return 0;
}
ret = dsi_host_parse_lane_data(msm_host, endpoint);
if (ret) {
- dev_err(dev, "%s: invalid lane configuration %d\n",
+ DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
+ ret = -EINVAL;
goto err;
}
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
- dev_dbg(dev, "%s: no valid device\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
+ ret = -ENODEV;
goto err;
}
@@ -1766,7 +1762,7 @@
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
- dev_err(dev, "%s: failed to get sfpb regmap\n",
+ DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
__func__);
ret = PTR_ERR(msm_host->sfpb);
}
@@ -1916,7 +1912,7 @@
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
return ret;
}
@@ -1924,7 +1920,7 @@
dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"dsi_isr", msm_host);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
msm_host->irq, ret);
return ret;
}
@@ -2420,7 +2416,7 @@
}
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -2438,17 +2434,14 @@
return 0;
}
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
- unsigned long *panel_flags)
+struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
{
- struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct drm_panel *panel;
+ return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
+}
- panel = of_drm_find_panel(msm_host->device_node);
- if (panel_flags)
- *panel_flags = msm_host->mode_flags;
-
- return panel;
+unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
+{
+ return to_msm_dsi_host(host)->mode_flags;
}
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 5224010..271aa7b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "msm_kms.h"
@@ -233,63 +225,79 @@
return dsi_bridge->id;
}
+static bool dsi_mgr_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+ unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
+ return !(host_flags & MIPI_DSI_MODE_VIDEO);
+}
+
+void msm_dsi_manager_setup_encoder(int id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_drm_private *priv = msm_dsi->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct drm_encoder *encoder = msm_dsi_get_encoder(msm_dsi);
+
+ if (encoder && kms->funcs->set_encoder_mode)
+ kms->funcs->set_encoder_mode(kms, encoder,
+ dsi_mgr_is_cmd_mode(msm_dsi));
+}
+
+static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
+{
+ struct msm_drm_private *priv = conn->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_dsi *master_dsi, *slave_dsi;
+ struct drm_panel *panel;
+
+ if (IS_DUAL_DSI() && !IS_MASTER_DSI_LINK(id)) {
+ master_dsi = other_dsi;
+ slave_dsi = msm_dsi;
+ } else {
+ master_dsi = msm_dsi;
+ slave_dsi = other_dsi;
+ }
+
+ /*
+ * There is only 1 panel in the global panel list for dual DSI mode.
+ * Therefore slave dsi should get the drm_panel instance from master
+ * dsi.
+ */
+ panel = msm_dsi_host_get_panel(master_dsi->host);
+ if (IS_ERR(panel)) {
+ DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id,
+ PTR_ERR(panel));
+ return PTR_ERR(panel);
+ }
+
+ if (!panel || !IS_DUAL_DSI())
+ goto out;
+
+ drm_object_attach_property(&conn->base,
+ conn->dev->mode_config.tile_property, 0);
+
+ /*
+ * Set split display info to kms once dual DSI panel is connected to
+ * both hosts.
+ */
+ if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
+ kms->funcs->set_split_display(kms, master_dsi->encoder,
+ slave_dsi->encoder,
+ dsi_mgr_is_cmd_mode(msm_dsi));
+ }
+
+out:
+ msm_dsi->panel = panel;
+ return 0;
+}
+
static enum drm_connector_status dsi_mgr_connector_detect(
struct drm_connector *connector, bool force)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
- struct msm_drm_private *priv = connector->dev->dev_private;
- struct msm_kms *kms = priv->kms;
-
- DBG("id=%d", id);
- if (!msm_dsi->panel) {
- msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
- &msm_dsi->device_flags);
-
- /* There is only 1 panel in the global panel list
- * for dual DSI mode. Therefore slave dsi should get
- * the drm_panel instance from master dsi, and
- * keep using the panel flags got from the current DSI link.
- */
- if (!msm_dsi->panel && IS_DUAL_DSI() &&
- !IS_MASTER_DSI_LINK(id) && other_dsi)
- msm_dsi->panel = msm_dsi_host_get_panel(
- other_dsi->host, NULL);
-
-
- if (msm_dsi->panel && kms->funcs->set_encoder_mode) {
- bool cmd_mode = !(msm_dsi->device_flags &
- MIPI_DSI_MODE_VIDEO);
- struct drm_encoder *encoder =
- msm_dsi_get_encoder(msm_dsi);
-
- kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
- }
-
- if (msm_dsi->panel && IS_DUAL_DSI())
- drm_object_attach_property(&connector->base,
- connector->dev->mode_config.tile_property, 0);
-
- /* Set split display info to kms once dual DSI panel is
- * connected to both hosts.
- */
- if (msm_dsi->panel && IS_DUAL_DSI() &&
- other_dsi && other_dsi->panel) {
- bool cmd_mode = !(msm_dsi->device_flags &
- MIPI_DSI_MODE_VIDEO);
- struct drm_encoder *encoder = msm_dsi_get_encoder(
- dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
- struct drm_encoder *slave_enc = msm_dsi_get_encoder(
- dsi_mgr_get_dsi(DSI_ENCODER_SLAVE));
-
- if (kms->funcs->set_split_display)
- kms->funcs->set_split_display(kms, encoder,
- slave_enc, cmd_mode);
- else
- pr_err("mdp does not support dual DSI\n");
- }
- }
return msm_dsi->panel ? connector_status_connected :
connector_status_disconnected;
@@ -527,8 +535,8 @@
}
static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -536,14 +544,7 @@
struct mipi_dsi_host *host = msm_dsi->host;
bool is_dual_dsi = IS_DUAL_DSI();
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
@@ -610,7 +611,17 @@
drm_connector_attach_encoder(connector, msm_dsi->encoder);
+ ret = msm_dsi_manager_panel_init(connector, id);
+ if (ret) {
+ DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret);
+ goto fail;
+ }
+
return connector;
+
+fail:
+ connector->funcs->destroy(msm_dsi->connector);
+ return ERR_PTR(ret);
}
bool msm_dsi_manager_validate_current_config(u8 id)
@@ -766,35 +777,6 @@
return true;
}
-void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_device *dev = msm_dsi->dev;
- struct msm_drm_private *priv;
- struct msm_kms *kms;
- struct drm_encoder *encoder;
- bool cmd_mode;
-
- /*
- * drm_device pointer is assigned to msm_dsi only in the modeset_init
- * path. If mipi_dsi_attach() happens in DSI driver's probe path
- * (generally the case when we're connected to a drm_panel of the type
- * mipi_dsi_device), this would be NULL. In such cases, try to set the
- * encoder mode in the DSI connector's detect() op.
- */
- if (!dev)
- return;
-
- priv = dev->dev_private;
- kms = priv->kms;
- encoder = msm_dsi_get_encoder(msm_dsi);
- cmd_mode = !(device_flags &
- MIPI_DSI_MODE_VIDEO);
-
- if (encoder && kms->funcs->set_encoder_mode)
- kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
-}
-
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
{
struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
@@ -839,6 +821,8 @@
if (msm_dsi->host)
msm_dsi_host_unregister(msm_dsi->host);
- msm_dsim->dsi[msm_dsi->id] = NULL;
+
+ if (msm_dsi->id >= 0)
+ msm_dsim->dsi[msm_dsi->id] = NULL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9a9fa0c..3522863 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/platform_device.h>
@@ -404,8 +396,12 @@
ret = devm_regulator_bulk_get(dev, num, s);
if (ret < 0) {
- dev_err(dev, "%s: failed to init regulator, ret=%d\n",
- __func__, ret);
+ if (ret != -EPROBE_DEFER) {
+ DRM_DEV_ERROR(dev,
+ "%s: failed to init regulator, ret=%d\n",
+ __func__, ret);
+ }
+
return ret;
}
@@ -441,7 +437,7 @@
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
@@ -451,7 +447,7 @@
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
- dev_err(dev, "regulator enable failed, %d\n", ret);
+ DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
goto fail;
}
@@ -472,7 +468,7 @@
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
- dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
pm_runtime_put_sync(dev);
}
@@ -507,6 +503,8 @@
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
.data = &dsi_phy_10nm_cfgs },
+ { .compatible = "qcom,dsi-phy-10nm-8998",
+ .data = &dsi_phy_10nm_8998_cfgs },
#endif
{}
};
@@ -543,7 +541,7 @@
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
@@ -574,7 +572,7 @@
phy->id = dsi_phy_get_id(phy);
if (phy->id < 0) {
ret = phy->id;
- dev_err(dev, "%s: couldn't identify PHY index, %d\n",
+ DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
@@ -584,20 +582,18 @@
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR(phy->base)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy);
- if (ret) {
- dev_err(dev, "%s: failed to init regulator\n", __func__);
+ if (ret)
goto fail;
- }
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
- dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
@@ -616,10 +612,12 @@
goto fail;
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
- if (IS_ERR_OR_NULL(phy->pll))
- dev_info(dev,
+ if (IS_ERR_OR_NULL(phy->pll)) {
+ DRM_DEV_INFO(dev,
"%s: pll init failed: %ld, need separate pll clk driver\n",
__func__, PTR_ERR(phy->pll));
+ phy->pll = NULL;
+ }
dsi_phy_disable_resource(phy);
@@ -675,21 +673,21 @@
ret = dsi_phy_enable_resource(phy);
if (ret) {
- dev_err(dev, "%s: resource enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
__func__, ret);
goto res_en_fail;
}
ret = dsi_phy_regulator_enable(phy);
if (ret) {
- dev_err(dev, "%s: regulator enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
goto reg_en_fail;
}
ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
if (ret) {
- dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
goto phy_en_fail;
}
@@ -702,7 +700,7 @@
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_pll_restore_state(phy->pll);
if (ret) {
- dev_err(dev, "%s: failed to restore pll state, %d\n",
+ DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
__func__, ret);
goto pll_restor_fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index a24ab80..c4069ce 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DSI_PHY_H__
@@ -21,6 +13,9 @@
#define dsi_phy_read(offset) msm_readl((offset))
#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+/* v3.0.0 10nm implementation that requires the old timings settings */
+#define V3_0_0_10NM_OLD_TIMINGS_QUIRK BIT(0)
+
struct msm_dsi_phy_ops {
int (*init) (struct msm_dsi_phy *phy);
int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
@@ -41,6 +36,7 @@
bool src_pll_truthtable[DSI_MAX][DSI_MAX];
const resource_size_t io_start[DSI_MAX];
const int num_dsi_phy;
+ const int quirks;
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
@@ -49,6 +45,7 @@
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index b3fffc8..47403d4 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -42,6 +42,9 @@
u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
void __iomem *lane_base = phy->lane_base;
+ if (phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)
+ tx_dctrl[3] = 0x02;
+
/* Strength ctrl settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
@@ -74,9 +77,11 @@
tx_dctrl[i]);
}
- /* Toggle BIT 0 to release freeze I/0 */
- dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
- dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+ if (!(phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)) {
+ /* Toggle BIT 0 to release freeze I/0 */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+ }
}
static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
@@ -93,7 +98,7 @@
DBG("");
if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -172,7 +177,7 @@
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -196,7 +201,7 @@
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
@@ -221,3 +226,22 @@
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
+ .type = MSM_DSI_PHY_10NM,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 36000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .init = dsi_10nm_phy_init,
+ },
+ .io_start = { 0xc994400, 0xc996400 },
+ .num_dsi_phy = 2,
+ .quirks = V3_0_0_10NM_OLD_TIMINGS_QUIRK,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 513f423..1594f14 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -1,16 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#include <linux/delay.h>
+
#include "dsi_phy.h"
#include "dsi.xml.h"
@@ -64,7 +58,7 @@
void __iomem *lane_base = phy->lane_base;
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -115,7 +109,7 @@
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -142,7 +136,7 @@
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1ca6c69..1afb7c5 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dsi_phy.h"
@@ -82,7 +74,7 @@
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 4972b52..b3f678f 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dsi_phy.h"
@@ -76,7 +68,7 @@
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 3980044..f225833 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -1,16 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#include <linux/delay.h>
+
#include "dsi_phy.h"
#include "dsi.xml.h"
@@ -132,7 +126,7 @@
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 613e206..4a4aa3c 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "dsi_pll.h"
@@ -175,7 +167,7 @@
}
if (IS_ERR(pll)) {
- dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
return pll;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 8b32271..c6a3623 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -1,21 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DSI_PLL_H__
#define __DSI_PLL_H__
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include "dsi.h"
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 41bec57..8f6100d 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -17,7 +17,7 @@
* | |
* | |
* +---------+ | +----------+ | +----+
- * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
- * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
@@ -104,8 +104,13 @@
struct dsi_pll_regs reg_setup;
/* private clocks: */
- struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
- u32 num_hws;
+ struct clk_hw *out_div_clk_hw;
+ struct clk_hw *bit_clk_hw;
+ struct clk_hw *byte_clk_hw;
+ struct clk_hw *by_2_bit_clk_hw;
+ struct clk_hw *post_out_div_clk_hw;
+ struct clk_hw *pclk_mux_hw;
+ struct clk_hw *out_dsiclk_hw;
/* clock-provider: */
struct clk_hw_onecell_data *hw_data;
@@ -617,8 +622,19 @@
static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct device *dev = &pll_10nm->pdev->dev;
DBG("DSI PLL%d", pll_10nm->id);
+ of_clk_del_provider(dev->of_node);
+
+ clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
+ clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
+ clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
+ clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
+ clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
+ clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
+ clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
+ clk_hw_unregister(&pll_10nm->base.clk_hw);
}
/*
@@ -639,10 +655,8 @@
.ops = &clk_ops_dsi_pll_10nm_vco,
};
struct device *dev = &pll_10nm->pdev->dev;
- struct clk_hw **hws = pll_10nm->hws;
struct clk_hw_onecell_data *hw_data;
struct clk_hw *hw;
- int num = 0;
int ret;
DBG("DSI%d", pll_10nm->id);
@@ -660,8 +674,6 @@
if (ret)
return ret;
- hws[num++] = &pll_10nm->base.clk_hw;
-
snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
@@ -670,10 +682,12 @@
pll_10nm->mmio +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_base_clk_hw;
+ }
- hws[num++] = hw;
+ pll_10nm->out_div_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
@@ -685,21 +699,25 @@
REG_DSI_10nm_PHY_CMN_CLK_CFG0,
0, 4, CLK_DIVIDER_ONE_BASED,
&pll_10nm->postdiv_lock);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_out_div_clk_hw;
+ }
- hws[num++] = hw;
+ pll_10nm->bit_clk_hw = hw;
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
CLK_SET_RATE_PARENT, 1, 8);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_bit_clk_hw;
+ }
- hws[num++] = hw;
+ pll_10nm->byte_clk_hw = hw;
hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
@@ -707,20 +725,24 @@
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_byte_clk_hw;
+ }
- hws[num++] = hw;
+ pll_10nm->by_2_bit_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
0, 1, 4);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_by_2_bit_clk_hw;
+ }
- hws[num++] = hw;
+ pll_10nm->post_out_div_clk_hw = hw;
snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
@@ -734,12 +756,14 @@
}, 4, 0, pll_10nm->phy_cmn_mmio +
REG_DSI_10nm_PHY_CMN_CLK_CFG1,
0, 2, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_post_out_div_clk_hw;
+ }
- hws[num++] = hw;
+ pll_10nm->pclk_mux_hw = hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
@@ -748,25 +772,44 @@
REG_DSI_10nm_PHY_CMN_CLK_CFG0,
4, 4, CLK_DIVIDER_ONE_BASED,
&pll_10nm->postdiv_lock);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_pclk_mux_hw;
+ }
- hws[num++] = hw;
+ pll_10nm->out_dsiclk_hw = hw;
hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
- pll_10nm->num_hws = num;
-
hw_data->num = NUM_PROVIDED_CLKS;
pll_10nm->hw_data = hw_data;
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_10nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
- return ret;
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+ goto err_dsiclk_hw;
}
return 0;
+
+err_dsiclk_hw:
+ clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
+err_pclk_mux_hw:
+ clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
+err_post_out_div_clk_hw:
+ clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
+err_by_2_bit_clk_hw:
+ clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
+err_byte_clk_hw:
+ clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
+err_bit_clk_hw:
+ clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
+err_out_div_clk_hw:
+ clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
+err_base_clk_hw:
+ clk_hw_unregister(&pll_10nm->base.clk_hw);
+
+ return ret;
}
struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
@@ -775,9 +818,6 @@
struct msm_dsi_pll *pll;
int ret;
- if (!pdev)
- return ERR_PTR(-ENODEV);
-
pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
if (!pll_10nm)
return ERR_PTR(-ENOMEM);
@@ -790,13 +830,13 @@
pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -815,7 +855,7 @@
ret = pll_10nm_register(pll_10nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
index 71fe60e..f847376 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -783,7 +775,7 @@
POLL_TIMEOUT_US);
if (unlikely(!locked))
- dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -829,7 +821,7 @@
ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_14nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -1039,7 +1031,7 @@
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_14nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -1067,13 +1059,13 @@
pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -1096,7 +1088,7 @@
ret = pll_14nm_register(pll_14nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 26e3a01..8c99e01 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -156,7 +148,7 @@
if (rate <= lpfr_lut[i].vco_rate)
break;
if (i == LPFR_LUT_SIZE) {
- dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
+ DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
rate);
return -EINVAL;
}
@@ -386,7 +378,7 @@
}
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL Lock success");
@@ -429,7 +421,7 @@
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -468,7 +460,7 @@
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -581,7 +573,7 @@
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -607,7 +599,7 @@
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -633,13 +625,13 @@
pll->en_seq_cnt = 1;
pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
} else {
- dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+ DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
return ERR_PTR(-EINVAL);
}
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 4900845..a6e7a25 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
@@ -327,7 +319,7 @@
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -368,7 +360,7 @@
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -482,7 +474,7 @@
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -508,7 +500,7 @@
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -526,7 +518,7 @@
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84..0f312ac 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/of_irq.h>
@@ -157,7 +149,7 @@
edp->bridge = msm_edp_bridge_init(edp);
if (IS_ERR(edp->bridge)) {
ret = PTR_ERR(edp->bridge);
- dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
edp->bridge = NULL;
goto fail;
}
@@ -165,7 +157,7 @@
edp->connector = msm_edp_connector_init(edp);
if (IS_ERR(edp->connector)) {
ret = PTR_ERR(edp->connector);
- dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
edp->connector = NULL;
goto fail;
}
@@ -173,7 +165,7 @@
edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (edp->irq < 0) {
ret = edp->irq;
- dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
goto fail;
}
@@ -181,7 +173,7 @@
edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"edp_isr", edp);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
edp->irq, ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
index e0f5818..f2c1785 100644
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __EDP_CONNECTOR_H__
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
index 82789dd..df10a01 100644
--- a/drivers/gpu/drm/msm/edp/edp_aux.c
+++ b/drivers/gpu/drm/msm/edp/edp_aux.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "edp.h"
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index 931a5c9..2950bba 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "edp.h"
@@ -52,22 +44,15 @@
}
static void edp_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = bridge->dev;
struct drm_connector *connector;
struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
struct msm_edp *edp = edp_bridge->edp;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if ((connector->encoder != NULL) &&
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 058ff92..73cb5fd 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "drm/drm_edid.h"
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7c72264..7f3dd3f 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c
index 36bb893..fcaf7b7 100644
--- a/drivers/gpu/drm/msm/edp/edp_phy.c
+++ b/drivers/gpu/drm/msm/edp/edp_phy.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "edp.h"
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 33e083f..355afb9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/of_irq.h>
@@ -98,7 +87,7 @@
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -109,7 +98,7 @@
of_node_put(phy_node);
if (!phy_pdev || !hdmi->phy) {
- dev_err(&pdev->dev, "phy driver is not ready\n");
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
@@ -153,7 +142,7 @@
hdmi->qfprom_mmio = msm_ioremap(pdev,
config->qfprom_mmio_name, "HDMI_QFPROM");
if (IS_ERR(hdmi->qfprom_mmio)) {
- dev_info(&pdev->dev, "can't find qfprom resource\n");
+ DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
hdmi->qfprom_mmio = NULL;
}
@@ -172,7 +161,7 @@
config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -195,7 +184,7 @@
config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
goto fail;
}
@@ -217,7 +206,7 @@
clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
@@ -239,7 +228,7 @@
clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
@@ -254,14 +243,14 @@
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
- dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
ret = msm_hdmi_get_phy(hdmi);
if (ret) {
- dev_err(&pdev->dev, "failed to get phy\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
goto fail;
}
@@ -303,7 +292,7 @@
hdmi->bridge = msm_hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
- dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
hdmi->bridge = NULL;
goto fail;
}
@@ -311,7 +300,7 @@
hdmi->connector = msm_hdmi_connector_init(hdmi);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
- dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
hdmi->connector = NULL;
goto fail;
}
@@ -319,7 +308,7 @@
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (hdmi->irq < 0) {
ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -327,7 +316,7 @@
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret);
goto fail;
}
@@ -436,38 +425,6 @@
{ "qcom,hdmi-tx-mux-lpm", true, 1, "HDMI_MUX_LPM" },
};
-static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
-{
- int gpio;
-
- /* try with the gpio names as in the table (downstream bindings) */
- gpio = of_get_named_gpio(of_node, name, 0);
- if (gpio < 0) {
- char name2[32];
-
- /* try with the gpio names as in the upstream bindings */
- snprintf(name2, sizeof(name2), "%s-gpios", name);
- gpio = of_get_named_gpio(of_node, name2, 0);
- if (gpio < 0) {
- char name3[32];
-
- /*
- * try again after stripping out the "qcom,hdmi-tx"
- * prefix. This is mainly to match "hpd-gpios" used
- * in the upstream bindings
- */
- if (sscanf(name2, "qcom,hdmi-tx-%s", name3))
- gpio = of_get_named_gpio(of_node, name3, 0);
- }
-
- if (gpio < 0) {
- DBG("failed to get gpio: %s (%d)", name, gpio);
- gpio = -1;
- }
- }
- return gpio;
-}
-
/*
* HDMI audio codec callbacks
*/
@@ -482,7 +439,7 @@
unsigned int level_shift = 0; /* 0dB */
bool down_mix = false;
- dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
params->sample_width, params->cea.channels);
switch (params->cea.channels) {
@@ -533,7 +490,7 @@
rate = HDMI_SAMPLE_RATE_192KHZ;
break;
default:
- dev_err(dev, "rate[%d] not supported!\n",
+ DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
params->sample_rate);
return -EINVAL;
}
@@ -577,7 +534,7 @@
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- static struct hdmi_platform_config *hdmi_cfg;
+ struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
int i, err;
@@ -585,7 +542,7 @@
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
if (!hdmi_cfg) {
- dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
+ DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
return -ENXIO;
}
@@ -593,11 +550,39 @@
hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
- hdmi_cfg->gpios[i].num = msm_hdmi_get_gpio(of_node,
- msm_hdmi_gpio_pdata[i].name);
+ const char *name = msm_hdmi_gpio_pdata[i].name;
+ struct gpio_desc *gpiod;
+
+ /*
+ * We are fetching the GPIO lines "as is" since the connector
+ * code is enabling and disabling the lines. Until that point
+ * the power-on default value will be kept.
+ */
+ gpiod = devm_gpiod_get_optional(dev, name, GPIOD_ASIS);
+ /* This will catch e.g. -PROBE_DEFER */
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ if (!gpiod) {
+ /* Try a second time, stripping down the name */
+ char name3[32];
+
+ /*
+ * Try again after stripping out the "qcom,hdmi-tx"
+ * prefix. This is mainly to match "hpd-gpios" used
+ * in the upstream bindings.
+ */
+ if (sscanf(name, "qcom,hdmi-tx-%s", name3))
+ gpiod = devm_gpiod_get_optional(dev, name3, GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ if (!gpiod)
+ DBG("failed to get gpio: %s", name);
+ }
+ hdmi_cfg->gpios[i].gpiod = gpiod;
+ if (gpiod)
+ gpiod_set_consumer_name(gpiod, msm_hdmi_gpio_pdata[i].label);
hdmi_cfg->gpios[i].output = msm_hdmi_gpio_pdata[i].output;
hdmi_cfg->gpios[i].value = msm_hdmi_gpio_pdata[i].value;
- hdmi_cfg->gpios[i].label = msm_hdmi_gpio_pdata[i].label;
}
dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 5c5df6a..bdac452 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __HDMI_CONNECTOR_H__
@@ -22,6 +11,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
#include "msm_drv.h"
@@ -33,10 +23,9 @@
struct hdmi_platform_config;
struct hdmi_gpio_data {
- int num;
+ struct gpio_desc *gpiod;
bool output;
int value;
- const char *label;
};
struct hdmi_audio {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 9c34b91..4c2058c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/hdmi.h>
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 7e35707..ba81338 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -1,20 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
+
#include "hdmi.h"
struct hdmi_bridge {
@@ -40,7 +31,7 @@
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -49,7 +40,7 @@
DBG("pixclock: %lu", hdmi->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
- dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
config->pwr_clk_names[0], ret);
}
}
@@ -57,7 +48,7 @@
for (i = 0; i < config->pwr_clk_cnt; i++) {
ret = clk_prepare_enable(hdmi->pwr_clks[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
}
}
@@ -82,7 +73,7 @@
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_disable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -101,11 +92,12 @@
u32 val;
int len;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ hdmi->connector, mode);
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (len < 0) {
- dev_err(&hdmi->pdev->dev,
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
return;
}
@@ -207,8 +199,8 @@
}
static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = hdmi_bridge->hdmi;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 30e908d..839822d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -1,21 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include "msm_kms.h"
@@ -79,30 +69,21 @@
static int gpio_config(struct hdmi *hdmi, bool on)
{
- struct device *dev = &hdmi->pdev->dev;
const struct hdmi_platform_config *config = hdmi->config;
- int ret, i;
+ int i;
if (on) {
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
struct hdmi_gpio_data gpio = config->gpios[i];
- if (gpio.num != -1) {
- ret = gpio_request(gpio.num, gpio.label);
- if (ret) {
- dev_err(dev,
- "'%s'(%d) gpio_request failed: %d\n",
- gpio.label, gpio.num, ret);
- goto err;
- }
-
+ if (gpio.gpiod) {
if (gpio.output) {
- gpio_direction_output(gpio.num,
- gpio.value);
+ gpiod_direction_output(gpio.gpiod,
+ gpio.value);
} else {
- gpio_direction_input(gpio.num);
- gpio_set_value_cansleep(gpio.num,
- gpio.value);
+ gpiod_direction_input(gpio.gpiod);
+ gpiod_set_value_cansleep(gpio.gpiod,
+ gpio.value);
}
}
}
@@ -112,29 +93,20 @@
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
struct hdmi_gpio_data gpio = config->gpios[i];
- if (gpio.num == -1)
+ if (!gpio.gpiod)
continue;
if (gpio.output) {
int value = gpio.value ? 0 : 1;
- gpio_set_value_cansleep(gpio.num, value);
+ gpiod_set_value_cansleep(gpio.gpiod, value);
}
-
- gpio_free(gpio.num);
};
DBG("gpio off");
}
return 0;
-err:
- while (i--) {
- if (config->gpios[i].num != -1)
- gpio_free(config->gpios[i].num);
- }
-
- return ret;
}
static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
@@ -156,7 +128,7 @@
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"failed to enable hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
}
@@ -180,7 +152,7 @@
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
- dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -188,13 +160,13 @@
ret = pinctrl_pm_select_default_state(dev);
if (ret) {
- dev_err(dev, "pinctrl state chg failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
goto fail;
}
ret = gpio_config(hdmi, true);
if (ret) {
- dev_err(dev, "failed to configure GPIOs: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
goto fail;
}
@@ -322,7 +294,7 @@
const struct hdmi_platform_config *config = hdmi->config;
struct hdmi_gpio_data hpd_gpio = config->gpios[HPD_GPIO_INDEX];
- return gpio_get_value(hpd_gpio.num) ?
+ return gpiod_get_value(hpd_gpio.gpiod) ?
connector_status_connected :
connector_status_disconnected;
}
@@ -341,7 +313,7 @@
* some platforms may not have hpd gpio. Rely only on the status
* provided by REG_HDMI_HPD_INT_STATUS in this case.
*/
- if (hpd_gpio.num == -1)
+ if (!hpd_gpio.gpiod)
return detect_reg(hdmi);
do {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 3656155..e774846 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -1,14 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include "hdmi.h"
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 73e2021..de182c0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "hdmi.h"
@@ -66,7 +55,7 @@
} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
if (!retry) {
- dev_err(dev->dev, "timeout waiting for DDC\n");
+ DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 4157722..1697e61 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/of_device.h>
@@ -37,7 +29,7 @@
reg = devm_regulator_get(dev, cfg->reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
cfg->reg_names[i], ret);
return ret;
}
@@ -51,7 +43,7 @@
clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev, "failed to get phy clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
cfg->clk_names[i], ret);
return ret;
}
@@ -73,14 +65,14 @@
for (i = 0; i < cfg->num_regs; i++) {
ret = regulator_enable(phy->regs[i]);
if (ret)
- dev_err(dev, "failed to enable regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
cfg->reg_names[i], ret);
}
for (i = 0; i < cfg->num_clks; i++) {
ret = clk_prepare_enable(phy->clks[i]);
if (ret)
- dev_err(dev, "failed to enable clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
cfg->clk_names[i], ret);
}
@@ -159,7 +151,7 @@
phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
if (IS_ERR(phy->mmio)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
return -ENOMEM;
}
@@ -177,7 +169,7 @@
ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
if (ret) {
- dev_err(dev, "couldn't init PLL\n");
+ DRM_DEV_ERROR(dev, "couldn't init PLL\n");
msm_hdmi_phy_resource_disable(phy);
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 0980da8..cf90a0c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "hdmi.h"
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 0df504c..a8f3b2c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -1,17 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include "hdmi.h"
@@ -725,7 +718,7 @@
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio_qserdes_com)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -737,7 +730,7 @@
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
}
@@ -745,7 +738,7 @@
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index a68eea4..95f2928 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -1,20 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
+
#include "hdmi.h"
static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 4a8b846..a2a6940 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "hdmi.h"
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 9959075..562dfac 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -1,22 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
#include "hdmi.h"
struct hdmi_pll_8960 {
@@ -445,7 +436,7 @@
pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -454,7 +445,7 @@
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 2b7bb6e..5ccfad7 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,91 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_atomic_trace.h"
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
-static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
- struct drm_atomic_state *old_state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- struct msm_drm_private *priv = old_state->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- int i;
-
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->active)
- continue;
-
- if (drm_crtc_vblank_get(crtc))
- continue;
-
- kms->funcs->wait_for_crtc_commit_done(kms, crtc);
-
- drm_crtc_vblank_put(crtc);
- }
-}
-
int msm_atomic_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
- struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
- struct dma_fence *fence;
if (!new_state->fb)
return 0;
- obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
-
- drm_atomic_set_fence_for_plane(new_state, fence);
+ drm_gem_fb_prepare_fb(plane, new_state);
return msm_framebuffer_prepare(new_state->fb, kms->aspace);
}
+static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
+{
+ unsigned crtc_mask = BIT(crtc_idx);
+
+ trace_msm_atomic_async_commit_start(crtc_mask);
+
+ mutex_lock(&kms->commit_lock);
+
+ if (!(kms->pending_crtc_mask & crtc_mask)) {
+ mutex_unlock(&kms->commit_lock);
+ goto out;
+ }
+
+ kms->pending_crtc_mask &= ~crtc_mask;
+
+ kms->funcs->enable_commit(kms);
+
+ /*
+ * Flush hardware updates:
+ */
+ trace_msm_atomic_flush_commit(crtc_mask);
+ kms->funcs->flush_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+
+ /*
+ * Wait for flush to complete:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ mutex_lock(&kms->commit_lock);
+ kms->funcs->complete_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+ kms->funcs->disable_commit(kms);
+
+out:
+ trace_msm_atomic_async_commit_finish(crtc_mask);
+}
+
+static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
+{
+ struct msm_pending_timer *timer = container_of(t,
+ struct msm_pending_timer, timer);
+ struct msm_drm_private *priv = timer->kms->dev->dev_private;
+
+ queue_work(priv->wq, &timer->work);
+
+ return HRTIMER_NORESTART;
+}
+
+static void msm_atomic_pending_work(struct work_struct *work)
+{
+ struct msm_pending_timer *timer = container_of(work,
+ struct msm_pending_timer, work);
+
+ msm_atomic_async_commit(timer->kms, timer->crtc_idx);
+}
+
+void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
+ struct msm_kms *kms, int crtc_idx)
+{
+ timer->kms = kms;
+ timer->crtc_idx = crtc_idx;
+ hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ timer->timer.function = msm_atomic_pending_timer;
+ INIT_WORK(&timer->work, msm_atomic_pending_work);
+}
+
+static bool can_do_async(struct drm_atomic_state *state,
+ struct drm_crtc **async_crtc)
+{
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, num_crtcs = 0;
+
+ if (!(state->legacy_cursor_update || state->async_update))
+ return false;
+
+ /* any connector change, means slow path: */
+ for_each_new_connector_in_state(state, connector, connector_state, i)
+ return false;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+ if (++num_crtcs > 1)
+ return false;
+ *async_crtc = crtc;
+ }
+
+ return true;
+}
+
+/* Get bitmask of crtcs that will need to be flushed. The bitmask
+ * can be used with for_each_crtc_mask() iterator, to iterate
+ * effected crtcs without needing to preserve the atomic state.
+ */
+static unsigned get_crtc_mask(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned i, mask = 0;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i)
+ mask |= drm_crtc_mask(crtc);
+
+ return mask;
+}
+
void msm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct drm_crtc *async_crtc = NULL;
+ unsigned crtc_mask = get_crtc_mask(state);
+ bool async = kms->funcs->vsync_time &&
+ can_do_async(state, &async_crtc);
+ trace_msm_atomic_commit_tail_start(async, crtc_mask);
+
+ kms->funcs->enable_commit(kms);
+
+ /*
+ * Ensure any previous (potentially async) commit has
+ * completed:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ mutex_lock(&kms->commit_lock);
+
+ /*
+ * Now that there is no in-progress flush, prepare the
+ * current update:
+ */
kms->funcs->prepare_commit(kms, state);
+ /*
+ * Push atomic updates down to hardware:
+ */
drm_atomic_helper_commit_modeset_disables(dev, state);
-
drm_atomic_helper_commit_planes(dev, state, 0);
-
drm_atomic_helper_commit_modeset_enables(dev, state);
- if (kms->funcs->commit) {
- DRM_DEBUG_ATOMIC("triggering commit\n");
- kms->funcs->commit(kms, state);
+ if (async) {
+ struct msm_pending_timer *timer =
+ &kms->pending_timers[drm_crtc_index(async_crtc)];
+
+ /* async updates are limited to single-crtc updates: */
+ WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
+
+ /*
+ * Start timer if we don't already have an update pending
+ * on this crtc:
+ */
+ if (!(kms->pending_crtc_mask & crtc_mask)) {
+ ktime_t vsync_time, wakeup_time;
+
+ kms->pending_crtc_mask |= crtc_mask;
+
+ vsync_time = kms->funcs->vsync_time(kms, async_crtc);
+ wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
+
+ hrtimer_start(&timer->timer, wakeup_time,
+ HRTIMER_MODE_ABS);
+ }
+
+ kms->funcs->disable_commit(kms);
+ mutex_unlock(&kms->commit_lock);
+
+ /*
+ * At this point, from drm core's perspective, we
+ * are done with the atomic update, so we can just
+ * go ahead and signal that it is done:
+ */
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ trace_msm_atomic_commit_tail_finish(async, crtc_mask);
+
+ return;
}
- msm_atomic_wait_for_commit_done(dev, state);
+ /*
+ * If there is any async flush pending on updated crtcs, fold
+ * them into the current flush.
+ */
+ kms->pending_crtc_mask &= ~crtc_mask;
- kms->funcs->complete_commit(kms, state);
+ /*
+ * Flush hardware updates:
+ */
+ trace_msm_atomic_flush_commit(crtc_mask);
+ kms->funcs->flush_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+
+ /*
+ * Wait for flush to complete:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ mutex_lock(&kms->commit_lock);
+ kms->funcs->complete_commit(kms, crtc_mask);
+ mutex_unlock(&kms->commit_lock);
+ kms->funcs->disable_commit(kms);
drm_atomic_helper_commit_hw_done(state);
-
drm_atomic_helper_cleanup_planes(dev, state);
+
+ trace_msm_atomic_commit_tail_finish(async, crtc_mask);
}
diff --git a/drivers/gpu/drm/msm/msm_atomic_trace.h b/drivers/gpu/drm/msm/msm_atomic_trace.h
new file mode 100644
index 0000000..b4ca0ed
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic_trace.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm_atomic
+#define TRACE_INCLUDE_FILE msm_atomic_trace
+
+TRACE_EVENT(msm_atomic_commit_tail_start,
+ TP_PROTO(bool async, unsigned crtc_mask),
+ TP_ARGS(async, crtc_mask),
+ TP_STRUCT__entry(
+ __field(bool, async)
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->async = async;
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("async=%d crtc_mask=%x",
+ __entry->async, __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_commit_tail_finish,
+ TP_PROTO(bool async, unsigned crtc_mask),
+ TP_ARGS(async, crtc_mask),
+ TP_STRUCT__entry(
+ __field(bool, async)
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->async = async;
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("async=%d crtc_mask=%x",
+ __entry->async, __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_async_commit_start,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_async_commit_finish,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_wait_flush_start,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_wait_flush_finish,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_flush_commit,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_atomic_tracepoints.c b/drivers/gpu/drm/msm/msm_atomic_tracepoints.c
new file mode 100644
index 0000000..011dc88
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic_tracepoints.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define CREATE_TRACE_POINTS
+#include "msm_atomic_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index d756436..6be8795 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -1,22 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef CONFIG_DEBUG_FS
+
#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
@@ -75,7 +69,7 @@
struct msm_gpu_show_priv *show_priv;
int ret;
- if (!gpu)
+ if (!gpu || !gpu->funcs->gpu_state_get)
return -ENODEV;
show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
@@ -201,13 +195,13 @@
ret = msm_rd_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install rd debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install perf debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
return ret;
}
@@ -235,14 +229,14 @@
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
return ret;
}
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms->funcs->debugfs_init) {
+ if (priv->kms && priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h
index f4077e3..2b91f8c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.h
+++ b/drivers/gpu/drm/msm/msm_debugfs.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_DEBUGFS_H__
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c1abad8..c84f0a8 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,31 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/dma-mapping.h>
#include <linux/kthread.h>
+#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_prime.h>
#include <drm/drm_of.h>
+#include <drm/drm_vblank.h>
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
+#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
-
+#include "adreno/adreno_gpu.h"
/*
* MSM driver version:
@@ -35,9 +34,12 @@
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ * GEM object's debug name
+ * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 3
+#define MSM_VERSION_MINOR 5
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -81,46 +83,6 @@
* Util/helpers:
*/
-int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
-{
- struct property *prop;
- const char *name;
- struct clk_bulk_data *local;
- int i = 0, ret, count;
-
- count = of_property_count_strings(dev->of_node, "clock-names");
- if (count < 1)
- return 0;
-
- local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
- count, GFP_KERNEL);
- if (!local)
- return -ENOMEM;
-
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
- local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!local[i].id) {
- devm_kfree(dev, local);
- return -ENOMEM;
- }
-
- i++;
- }
-
- ret = devm_clk_bulk_get(dev, count, local);
-
- if (ret) {
- for (i = 0; i < count; i++)
- devm_kfree(dev, (void *) local[i].id);
- devm_kfree(dev, local);
-
- return ret;
- }
-
- *bulk = local;
- return count;
-}
-
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name)
{
@@ -170,7 +132,7 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
return ERR_PTR(-EINVAL);
}
@@ -178,7 +140,7 @@
ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
if (!ptr) {
- dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -203,62 +165,44 @@
return val;
}
-struct vblank_event {
- struct list_head node;
+struct msm_vblank_work {
+ struct work_struct work;
int crtc_id;
bool enable;
+ struct msm_drm_private *priv;
};
-static void vblank_ctrl_worker(struct kthread_work *work)
+static void vblank_ctrl_worker(struct work_struct *work)
{
- struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
- struct msm_vblank_ctrl, work);
- struct msm_drm_private *priv = container_of(vbl_ctrl,
- struct msm_drm_private, vblank_ctrl);
+ struct msm_vblank_work *vbl_work = container_of(work,
+ struct msm_vblank_work, work);
+ struct msm_drm_private *priv = vbl_work->priv;
struct msm_kms *kms = priv->kms;
- struct vblank_event *vbl_ev, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ if (vbl_work->enable)
+ kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+ else
+ kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
- if (vbl_ev->enable)
- kms->funcs->enable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
- else
- kms->funcs->disable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
-
- kfree(vbl_ev);
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- }
-
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ kfree(vbl_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
int crtc_id, bool enable)
{
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev;
- unsigned long flags;
+ struct msm_vblank_work *vbl_work;
- vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
- if (!vbl_ev)
+ vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+ if (!vbl_work)
return -ENOMEM;
- vbl_ev->crtc_id = crtc_id;
- vbl_ev->enable = enable;
+ INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ vbl_work->crtc_id = crtc_id;
+ vbl_work->enable = enable;
+ vbl_work->priv = priv;
- kthread_queue_work(&priv->disp_thread[crtc_id].worker,
- &vbl_ctrl->work);
+ queue_work(priv->wq, &vbl_work->work);
return 0;
}
@@ -270,31 +214,31 @@
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_mdss *mdss = priv->mdss;
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev, *tmp;
int i;
+ /*
+ * Shutdown the hw if we're far enough along where things might be on.
+ * If we run this too early, we'll end up panicking in any variety of
+ * places. Since we don't register the drm device until late in
+ * msm_drm_init, drm_dev->registered is used as an indicator that the
+ * shutdown will be successful.
+ */
+ if (ddev->registered) {
+ drm_dev_unregister(ddev);
+ drm_atomic_helper_shutdown(ddev);
+ }
+
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- kthread_flush_work(&vbl_ctrl->work);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- kfree(vbl_ev);
- }
- /* clean up display commit/event worker threads */
+ flush_workqueue(priv->wq);
+
+ /* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->disp_thread[i].thread) {
- kthread_flush_worker(&priv->disp_thread[i].worker);
- kthread_stop(priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
-
if (priv->event_thread[i].thread) {
- kthread_flush_worker(&priv->event_thread[i].worker);
- kthread_stop(priv->event_thread[i].thread);
+ kthread_destroy_worker(&priv->event_thread[i].worker);
priv->event_thread[i].thread = NULL;
}
}
@@ -303,8 +247,6 @@
drm_kms_helper_poll_fini(ddev);
- drm_dev_unregister(ddev);
-
msm_perf_debugfs_cleanup(priv);
msm_rd_debugfs_cleanup(priv);
@@ -312,15 +254,13 @@
if (fbdev && priv->fbdev)
msm_fbdev_free(ddev);
#endif
+
drm_mode_config_cleanup(ddev);
pm_runtime_get_sync(dev);
drm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -337,8 +277,9 @@
mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
+ destroy_workqueue(priv->wq);
kfree(priv);
return 0;
@@ -357,6 +298,14 @@
#include <linux/of_address.h>
+bool msm_use_mmu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* a2xx comes with its own MMU */
+ return priv->is_a2xx || iommu_present(&platform_bus_type);
+}
+
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -395,7 +344,7 @@
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
- } else if (!iommu_present(&platform_bus_type)) {
+ } else if (!msm_use_mmu(dev)) {
DRM_INFO("using %s VRAM carveout\n", vram);
size = memparse(vram, NULL);
}
@@ -418,12 +367,12 @@
p = dma_alloc_attrs(dev->dev, size,
&priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
- dev_err(dev->dev, "failed to allocate VRAM\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
return -ENOMEM;
}
- dev_info(dev->dev, "VRAM: %08x->%08x\n",
+ DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
(uint32_t)priv->vram.paddr,
(uint32_t)(priv->vram.paddr + size));
}
@@ -443,7 +392,7 @@
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
- dev_err(dev, "failed to allocate drm_device\n");
+ DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
return PTR_ERR(ddev);
}
@@ -452,7 +401,7 @@
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
- goto err_unref_drm_dev;
+ goto err_put_drm_dev;
}
ddev->dev_private = priv;
@@ -476,10 +425,10 @@
priv->wq = alloc_ordered_workqueue("msm", 0);
+ INIT_WORK(&priv->free_work, msm_gem_free_work);
+ init_llist_head(&priv->free_list);
+
INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
- spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
@@ -507,19 +456,16 @@
priv->kms = kms;
break;
default:
- kms = ERR_PTR(-ENODEV);
+ /* valid only for the dummy headless case, where of_node=NULL */
+ WARN_ON(dev->of_node);
+ kms = NULL;
break;
}
if (IS_ERR(kms)) {
- /*
- * NOTE: once we have GPU support, having no kms should not
- * be considered fatal.. ideally we would still support gpu
- * and (for example) use dmabuf/prime to share buffers with
- * imx drm driver on iMX5
- */
- dev_err(dev, "failed to load kms\n");
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
+ priv->kms = NULL;
goto err_msm_uninit;
}
@@ -527,9 +473,10 @@
ddev->mode_config.normalize_zpos = true;
if (kms) {
+ kms->dev = ddev;
ret = kms->funcs->hw_init(kms);
if (ret) {
- dev_err(dev, "kms hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
}
@@ -544,26 +491,6 @@
*/
param.sched_priority = 16;
for (i = 0; i < priv->num_crtcs; i++) {
-
- /* initialize display thread */
- priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
- kthread_init_worker(&priv->disp_thread[i].worker);
- priv->disp_thread[i].dev = ddev;
- priv->disp_thread[i].thread =
- kthread_run(kthread_worker_fn,
- &priv->disp_thread[i].worker,
- "crtc_commit:%d", priv->disp_thread[i].crtc_id);
- ret = sched_setscheduler(priv->disp_thread[i].thread,
- SCHED_FIFO, ¶m);
- if (ret)
- pr_warn("display thread priority update failed: %d\n",
- ret);
-
- if (IS_ERR(priv->disp_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_commit kthread\n");
- priv->disp_thread[i].thread = NULL;
- }
-
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
@@ -572,47 +499,22 @@
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
- /**
- * event thread should also run at same priority as disp_thread
- * because it is handling frame_done events. A lower priority
- * event thread and higher priority disp_thread can causes
- * frame_pending counters beyond 2. This can lead to commit
- * failure at crtc commit level.
- */
- ret = sched_setscheduler(priv->event_thread[i].thread,
- SCHED_FIFO, ¶m);
- if (ret)
- pr_warn("display event thread priority update failed: %d\n",
- ret);
-
if (IS_ERR(priv->event_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_event kthread\n");
+ DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
priv->event_thread[i].thread = NULL;
- }
-
- if ((!priv->disp_thread[i].thread) ||
- !priv->event_thread[i].thread) {
- /* clean up previously created threads if any */
- for ( ; i >= 0; i--) {
- if (priv->disp_thread[i].thread) {
- kthread_stop(
- priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
-
- if (priv->event_thread[i].thread) {
- kthread_stop(
- priv->event_thread[i].thread);
- priv->event_thread[i].thread = NULL;
- }
- }
goto err_msm_uninit;
}
+
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, ¶m);
+ if (ret)
+ dev_warn(dev, "event_thread set priority failed:%d\n",
+ ret);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
- dev_err(dev, "failed to initialize vblank\n");
+ DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
@@ -621,7 +523,7 @@
ret = drm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
- dev_err(dev, "failed to install IRQ handler\n");
+ DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
}
@@ -633,7 +535,7 @@
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (fbdev)
+ if (kms && fbdev)
priv->fbdev = msm_fbdev_init(ddev);
#endif
@@ -653,8 +555,8 @@
mdss->funcs->destroy(ddev);
err_free_priv:
kfree(priv);
-err_unref_drm_dev:
- drm_dev_unref(ddev);
+err_put_drm_dev:
+ drm_dev_put(ddev);
return ret;
}
@@ -677,6 +579,7 @@
static int context_init(struct drm_device *dev, struct drm_file *file)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -685,6 +588,7 @@
msm_submitqueue_init(dev, ctx);
+ ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
file->driver_priv = ctx;
return 0;
@@ -741,7 +645,11 @@
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
- return kms->funcs->irq_postinstall(kms);
+
+ if (kms->funcs->irq_postinstall)
+ return kms->funcs->irq_postinstall(kms);
+
+ return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
@@ -808,7 +716,7 @@
}
return msm_gem_new_handle(dev, file, args->size,
- args->flags, &args->handle);
+ args->flags, &args->handle, NULL);
}
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -866,6 +774,10 @@
if (!priv->gpu)
return -EINVAL;
+ /*
+ * Don't pin the memory here - just get an address so that userspace can
+ * be productive
+ */
return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
}
@@ -874,23 +786,71 @@
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
- int ret = 0;
+ struct msm_gem_object *msm_obj;
+ int i, ret = 0;
- if (args->flags & ~MSM_INFO_FLAGS)
+ if (args->pad)
return -EINVAL;
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ case MSM_INFO_GET_IOVA:
+ /* value returned as immediate, not pointer, so len==0: */
+ if (args->len)
+ return -EINVAL;
+ break;
+ case MSM_INFO_SET_NAME:
+ case MSM_INFO_GET_NAME:
+ break;
+ default:
+ return -EINVAL;
+ }
+
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- if (args->flags & MSM_INFO_IOVA) {
- uint64_t iova;
+ msm_obj = to_msm_bo(obj);
- ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
- if (!ret)
- args->offset = iova;
- } else {
- args->offset = msm_gem_mmap_offset(obj);
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ args->value = msm_gem_mmap_offset(obj);
+ break;
+ case MSM_INFO_GET_IOVA:
+ ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+ break;
+ case MSM_INFO_SET_NAME:
+ /* length check should leave room for terminating null: */
+ if (args->len >= sizeof(msm_obj->name)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
+ args->len)) {
+ msm_obj->name[0] = '\0';
+ ret = -EFAULT;
+ break;
+ }
+ msm_obj->name[args->len] = '\0';
+ for (i = 0; i < args->len; i++) {
+ if (!isprint(msm_obj->name[i])) {
+ msm_obj->name[i] = '\0';
+ break;
+ }
+ }
+ break;
+ case MSM_INFO_GET_NAME:
+ if (args->value && (args->len < strlen(msm_obj->name))) {
+ ret = -EINVAL;
+ break;
+ }
+ args->len = strlen(msm_obj->name);
+ if (args->value) {
+ if (copy_to_user(u64_to_user_ptr(args->value),
+ msm_obj->name, args->len))
+ ret = -EFAULT;
+ }
+ break;
}
drm_gem_object_put_unlocked(obj);
@@ -978,6 +938,11 @@
args->flags, &args->id);
}
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return msm_submitqueue_query(dev, file->driver_priv, data);
+}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -988,16 +953,17 @@
}
static const struct drm_ioctl_desc msm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1019,9 +985,7 @@
};
static struct drm_driver msm_driver = {
- .driver_features = DRIVER_HAVE_IRQ |
- DRIVER_GEM |
- DRIVER_PRIME |
+ .driver_features = DRIVER_GEM |
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET,
@@ -1034,15 +998,12 @@
.irq_uninstall = msm_irq_uninstall,
.enable_vblank = msm_enable_vblank,
.disable_vblank = msm_disable_vblank,
- .gem_free_object = msm_gem_free_object,
+ .gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
@@ -1069,18 +1030,15 @@
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_suspend)
- return kms->funcs->pm_suspend(dev);
-
- drm_kms_helper_poll_disable(ddev);
+ if (WARN_ON(priv->pm_state))
+ drm_atomic_state_put(priv->pm_state);
priv->pm_state = drm_atomic_helper_suspend(ddev);
if (IS_ERR(priv->pm_state)) {
- drm_kms_helper_poll_enable(ddev);
- return PTR_ERR(priv->pm_state);
+ int ret = PTR_ERR(priv->pm_state);
+ DRM_ERROR("Failed to suspend dpu, %d\n", ret);
+ return ret;
}
return 0;
@@ -1090,16 +1048,16 @@
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
+ int ret;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_resume)
- return kms->funcs->pm_resume(dev);
+ if (WARN_ON(!priv->pm_state))
+ return -ENOENT;
- drm_atomic_helper_resume(ddev, priv->pm_state);
- drm_kms_helper_poll_enable(ddev);
+ ret = drm_atomic_helper_resume(ddev, priv->pm_state);
+ if (!ret)
+ priv->pm_state = NULL;
- return 0;
+ return ret;
}
#endif
@@ -1184,7 +1142,7 @@
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret) {
- dev_err(mdp_dev, "unable to parse port endpoint\n");
+ DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
of_node_put(ep_node);
return ret;
}
@@ -1206,8 +1164,10 @@
if (!intf)
continue;
- drm_of_component_match_add(master_dev, matchptr, compare_of,
- intf);
+ if (of_device_is_available(intf))
+ drm_of_component_match_add(master_dev, matchptr,
+ compare_of, intf);
+
of_node_put(intf);
}
@@ -1235,13 +1195,13 @@
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
- dev_err(dev, "failed to populate children devices\n");
+ DRM_DEV_ERROR(dev, "failed to populate children devices\n");
return ret;
}
mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
if (!mdp_dev) {
- dev_err(dev, "failed to find MDSS MDP node\n");
+ DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
of_platform_depopulate(dev);
return -ENODEV;
}
@@ -1271,6 +1231,7 @@
static const struct of_device_id msm_gpu_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ { .compatible = "amd,imageon" },
{ .compatible = "qcom,kgsl-3d0" },
{ },
};
@@ -1284,7 +1245,8 @@
if (!np)
return 0;
- drm_of_component_match_add(dev, matchptr, compare_of, np);
+ if (of_device_is_available(np))
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
@@ -1315,22 +1277,32 @@
struct component_match *match = NULL;
int ret;
- ret = add_display_components(&pdev->dev, &match);
- if (ret)
- return ret;
+ if (get_mdp_ver(pdev)) {
+ ret = add_display_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+ }
ret = add_gpu_components(&pdev->dev, &match);
if (ret)
- return ret;
+ goto fail;
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
*/
ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
if (ret)
- return ret;
+ goto fail;
- return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+ ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ of_platform_depopulate(&pdev->dev);
+ return ret;
}
static int msm_pdev_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d11f32..71547e7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_DRV_H__
@@ -33,14 +22,13 @@
#include <linux/types.h>
#include <linux/of_graph.h>
#include <linux/of_device.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <linux/kthread.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
@@ -68,6 +56,7 @@
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
+ struct msm_gem_address_space *aspace;
};
enum msm_mdp_plane_property {
@@ -77,12 +66,6 @@
PLANE_PROP_MAX_NUM
};
-struct msm_vblank_ctrl {
- struct kthread_work work;
- struct list_head event_list;
- spinlock_t lock;
-};
-
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
@@ -126,7 +109,7 @@
/**
* struct msm_display_info - defines display properties
- * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @intf_type: DRM_MODE_ENCODER_ type
* @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is
@@ -179,6 +162,8 @@
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
+ /* gpu is only set on open(), but we need this info earlier */
+ bool is_a2xx;
struct drm_fb_helper *fbdev;
@@ -189,6 +174,10 @@
/* list of GEM objects: */
struct list_head inactive_list;
+ /* worker for delayed free of objects: */
+ struct work_struct free_work;
+ struct llist_head free_list;
+
struct workqueue_struct *wq;
unsigned int num_planes;
@@ -197,7 +186,6 @@
unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS];
- struct msm_drm_thread disp_thread[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
@@ -226,7 +214,6 @@
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- struct msm_vblank_ctrl vblank_ctrl;
struct drm_atomic_state *pm_state;
};
@@ -234,17 +221,28 @@
uint32_t pixel_format;
};
+struct msm_pending_timer;
+
int msm_atomic_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
+void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
+ struct msm_kms *kms, int crtc_idx);
void msm_atomic_commit_tail(struct drm_atomic_state *state);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt);
+ struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+ struct msm_gem_vma *vma, int prot,
+ struct sg_table *sgt, int npages);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
@@ -252,9 +250,15 @@
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end);
+
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+bool msm_use_mmu(struct drm_device *dev);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -269,12 +273,14 @@
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -283,7 +289,6 @@
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -301,7 +306,7 @@
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle);
+ uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -312,8 +317,14 @@
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_free_work(struct work_struct *work);
+
+__printf(2, 3)
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
@@ -376,12 +387,14 @@
int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
+__printf(3, 4)
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...);
int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+__printf(3, 4)
static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...) {}
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
@@ -389,7 +402,6 @@
#endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
-int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name);
@@ -404,6 +416,8 @@
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 2a7348a..37674e8 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -1,23 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -35,6 +27,7 @@
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
+ .dirty = drm_atomic_helper_dirtyfb,
};
#ifdef CONFIG_DEBUG_FS
@@ -66,7 +59,7 @@
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -81,7 +74,7 @@
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(fb->obj[i], aspace);
+ msm_gem_unpin_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
@@ -106,9 +99,11 @@
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
struct drm_gem_object *bos[4] = {0};
struct drm_framebuffer *fb;
- int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+ int ret, i, n = info->num_planes;
for (i = 0; i < n; i++) {
bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
@@ -135,26 +130,24 @@
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_framebuffer *msm_fb = NULL;
struct drm_framebuffer *fb;
const struct msm_format *format;
int ret, i, n;
- unsigned int hsub, vsub;
DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- n = drm_format_num_planes(mode_cmd->pixel_format);
- hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
- vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
-
+ n = info->num_planes;
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
- dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
@@ -176,12 +169,12 @@
}
for (i = 0; i < n; i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + width * info->cpp[i]
+ mode_cmd->offsets[i];
if (bos[i]->size < min_size) {
@@ -196,7 +189,7 @@
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
@@ -233,13 +226,15 @@
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
}
if (IS_ERR(bo)) {
- dev_err(dev->dev, "failed to allocate buffer object\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
return ERR_CAST(bo);
}
+ msm_gem_object_set_name(bo, "stolenfb");
+
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 456622b..cff198b 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -1,22 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -91,7 +81,7 @@
sizes->surface_height, pitch, format);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
return PTR_ERR(fb);
}
@@ -104,15 +94,15 @@
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
- dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- dev_err(dev->dev, "failed to allocate fb info\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -122,13 +112,9 @@
fbdev->fb = fb;
helper->fb = fb;
- fbi->par = helper;
fbi->fbops = &msm_fb_ops;
- strcpy(fbi->fix.id, "msm");
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = paddr;
@@ -176,7 +162,7 @@
ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
if (ret) {
- dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
@@ -184,6 +170,9 @@
if (ret)
goto fini;
+ /* the fw fb could be anywhere in memory */
+ drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
+
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 349c12f..ad27036 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/dma-fence.h>
@@ -119,11 +108,6 @@
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
@@ -133,10 +117,7 @@
static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
- .enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
struct dma_fence *
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index b9fe059..2d9af66 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_FENCE_H__
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f59ca27..5a6a79f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/spinlock.h>
@@ -20,6 +9,8 @@
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
+#include <drm/drm_prime.h>
+
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h"
@@ -43,6 +34,46 @@
return !msm_obj->vram_node;
}
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API. Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
+ dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_map_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
+ dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
@@ -88,7 +119,7 @@
p = get_pages_vram(obj, npages);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n",
+ DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
return p;
}
@@ -99,7 +130,7 @@
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
- dev_err(dev->dev, "failed to allocate sgt\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
@@ -108,8 +139,7 @@
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_map_sg(dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sync_for_device(msm_obj);
}
return msm_obj->pages;
@@ -138,9 +168,7 @@
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents,
- DMA_BIDIRECTIONAL);
+ sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
@@ -280,7 +308,7 @@
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
+ DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
return 0;
}
@@ -352,63 +380,110 @@
WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
+ if (vma->aspace) {
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
+ }
del_vma(vma);
}
}
-/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
int ret = 0;
- mutex_lock(&msm_obj->lock);
-
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
- return -EBUSY;
- }
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
vma = lookup_vma(obj, aspace);
if (!vma) {
- struct page **pages;
-
vma = add_vma(obj, aspace);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto unlock;
- }
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+ if (ret) {
+ del_vma(vma);
+ return ret;
}
-
- ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
- obj->size >> PAGE_SHIFT);
- if (ret)
- goto fail;
}
*iova = vma->iova;
-
- mutex_unlock(&msm_obj->lock);
return 0;
+}
-fail:
- del_vma(vma);
-unlock:
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+ struct page **pages;
+ int prot = IOMMU_READ;
+
+ if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
+ prot |= IOMMU_WRITE;
+
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+ return -EBUSY;
+
+ vma = lookup_vma(obj, aspace);
+ if (WARN_ON(!vma))
+ return -EINVAL;
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ return msm_gem_map_vma(aspace, vma, prot,
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ u64 local;
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+
+ ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+ if (!ret)
+ ret = msm_gem_pin_iova(obj, aspace);
+
+ if (!ret)
+ *iova = local;
+
mutex_unlock(&msm_obj->lock);
return ret;
}
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
+ mutex_unlock(&msm_obj->lock);
+
+ return ret;
+}
+
/* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_iova()'.
+ * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
*/
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
@@ -424,15 +499,24 @@
return vma ? vma->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj,
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
- // XXX TODO ..
- // NOTE: probably don't need a _locked() version.. we wouldn't
- // normally unmap here, but instead just mark that it could be
- // unmapped (if the iova refcnt drops to zero), but then later
- // if another _get_iova_locked() fails we can start unmapping
- // things that are no longer needed..
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ mutex_lock(&msm_obj->lock);
+ vma = lookup_vma(obj, aspace);
+
+ if (!WARN_ON(!vma))
+ msm_gem_unmap_vma(aspace, vma);
+
+ mutex_unlock(&msm_obj->lock);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -441,7 +525,7 @@
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
- MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -473,7 +557,7 @@
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv > madv)) {
- dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
@@ -618,14 +702,13 @@
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
int i, ret;
- fobj = reservation_object_get_list(msm_obj->resv);
+ fobj = dma_resv_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = reservation_object_get_excl(msm_obj->resv);
+ fence = dma_resv_get_excl(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -639,7 +722,7 @@
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(msm_obj->resv));
+ dma_resv_held(obj->resv));
if (fence->context != fctx->context) {
ret = dma_fence_wait(fence, true);
if (ret)
@@ -657,9 +740,9 @@
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
- reservation_object_add_excl_fence(msm_obj->resv, fence);
+ dma_resv_add_excl_fence(obj->resv, fence);
else
- reservation_object_add_shared_fence(msm_obj->resv, fence);
+ dma_resv_add_shared_fence(obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -679,13 +762,12 @@
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
bool write = !!(op & MSM_PREP_WRITE);
unsigned long remain =
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+ ret = dma_resv_wait_timeout_rcu(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -708,7 +790,7 @@
struct seq_file *m)
{
if (!dma_fence_is_signaled(fence))
- seq_printf(m, "\t%9s: %s %s seq %u\n", type,
+ seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->seqno);
@@ -717,8 +799,8 @@
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct reservation_object *robj = msm_obj->resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *robj = obj->resv;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
@@ -739,16 +821,25 @@
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
- /* FIXME: we need to print the address space here too */
- list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " %08llx", vma->iova);
+ seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- seq_printf(m, " %zu%s\n", obj->size, madv);
+ if (!list_empty(&msm_obj->vmas)) {
+
+ seq_puts(m, " vmas:");
+
+ list_for_each_entry(vma, &msm_obj->vmas, list)
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
+ vma->aspace != NULL ? vma->aspace->name : NULL,
+ vma->iova, vma->mapped ? "mapped" : "unmapped",
+ vma->inuse);
+
+ seq_puts(m, "\n");
+ }
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -775,9 +866,10 @@
int count = 0;
size_t size = 0;
+ seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, mm_list) {
struct drm_gem_object *obj = &msm_obj->base;
- seq_printf(m, " ");
+ seq_puts(m, " ");
msm_gem_describe(obj, m);
count++;
size += obj->size;
@@ -790,8 +882,18 @@
/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (llist_add(&msm_obj->freed, &priv->free_list))
+ queue_work(priv->wq, &priv->free_work);
+}
+
+static void free_object(struct msm_gem_object *msm_obj)
+{
+ struct drm_gem_object *obj = &msm_obj->base;
+ struct drm_device *dev = obj->dev;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -820,18 +922,39 @@
put_pages(obj);
}
- if (msm_obj->resv == &msm_obj->_resv)
- reservation_object_fini(msm_obj->resv);
-
drm_gem_object_release(obj);
mutex_unlock(&msm_obj->lock);
kfree(msm_obj);
}
+void msm_gem_free_work(struct work_struct *work)
+{
+ struct msm_drm_private *priv =
+ container_of(work, struct msm_drm_private, free_work);
+ struct drm_device *dev = priv->dev;
+ struct llist_node *freed;
+ struct msm_gem_object *msm_obj, *next;
+
+ while ((freed = llist_del_all(&priv->free_list))) {
+
+ mutex_lock(&dev->struct_mutex);
+
+ llist_for_each_entry_safe(msm_obj, next,
+ freed, freed)
+ free_object(msm_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (need_resched())
+ break;
+ }
+}
+
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle)
+ uint32_t size, uint32_t flags, uint32_t *handle,
+ char *name)
{
struct drm_gem_object *obj;
int ret;
@@ -841,6 +964,9 @@
if (IS_ERR(obj))
return PTR_ERR(obj);
+ if (name)
+ msm_gem_object_set_name(obj, "%s", name);
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -851,7 +977,6 @@
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
- struct reservation_object *resv,
struct drm_gem_object **obj,
bool struct_mutex_locked)
{
@@ -864,7 +989,7 @@
case MSM_BO_WC:
break;
default:
- dev_err(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
@@ -878,13 +1003,6 @@
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- if (resv) {
- msm_obj->resv = resv;
- } else {
- msm_obj->resv = &msm_obj->_resv;
- reservation_object_init(msm_obj->resv);
- }
-
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
@@ -912,9 +1030,9 @@
size = PAGE_ALIGN(size);
- if (!iommu_present(&platform_bus_type))
+ if (!msm_use_mmu(dev))
use_vram = true;
- else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true;
if (WARN_ON(use_vram && !priv->vram.size))
@@ -926,7 +1044,7 @@
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
+ ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
if (ret)
goto fail;
@@ -959,6 +1077,13 @@
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
return obj;
@@ -989,14 +1114,14 @@
int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
- if (!iommu_present(&platform_bus_type)) {
- dev_err(dev->dev, "cannot import without IOMMU\n");
+ if (!msm_use_mmu(dev)) {
+ DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
return ERR_PTR(-EINVAL);
}
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
if (ret)
goto fail;
@@ -1040,24 +1165,30 @@
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_iova(obj, aspace, iova);
- if (ret) {
- drm_gem_object_put(obj);
- return ERR_PTR(ret);
- }
+ ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ if (ret)
+ goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_put_iova(obj, aspace);
- drm_gem_object_put(obj);
- return ERR_CAST(vaddr);
+ msm_gem_unpin_iova(obj, aspace);
+ ret = PTR_ERR(vaddr);
+ goto err;
}
if (bo)
*bo = obj;
return vaddr;
+err:
+ if (locked)
+ drm_gem_object_put(obj);
+ else
+ drm_gem_object_put_unlocked(obj);
+
+ return ERR_PTR(ret);
+
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1073,3 +1204,31 @@
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked)
+{
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ msm_gem_put_vaddr(bo);
+ msm_gem_unpin_iova(bo, aspace);
+
+ if (locked)
+ drm_gem_object_put(bo);
+ else
+ drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(bo);
+ va_list ap;
+
+ if (!fmt)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+ va_end(ap);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5d9bd3..9e0953c 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -1,25 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
#include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
@@ -41,6 +30,8 @@
uint64_t iova;
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
+ bool mapped;
+ int inuse;
};
struct msm_gem_object {
@@ -82,15 +73,15 @@
struct list_head vmas; /* list of msm_gem_vma */
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
+ struct llist_node freed;
/* For physically contiguous buffers. Used when we don't have
* an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
+
+ char name[32]; /* Identifier to print for the debugfs files */
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -129,6 +120,7 @@
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
+void msm_gem_free_work(struct work_struct *work);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
@@ -138,6 +130,7 @@
struct msm_gem_submit {
struct drm_device *dev;
struct msm_gpu *gpu;
+ struct msm_gem_address_space *aspace;
struct list_head node; /* node in ring submit list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
@@ -150,6 +143,7 @@
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
+ u32 ident; /* A "identifier" for the submit for logging */
struct {
uint32_t type;
uint32_t size; /* in dwords */
@@ -158,7 +152,10 @@
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
- struct msm_gem_object *obj;
+ union {
+ struct msm_gem_object *obj;
+ uint32_t handle;
+ };
uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 13403c6..d7c8948 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -1,25 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/dma-buf.h>
+
+#include <drm/drm_prime.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
-#include <linux/dma-buf.h>
-
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -70,10 +61,3 @@
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
-
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- return msm_obj->resv;
-}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index b72d8e6..722d616 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c3ae750..be5327a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -1,25 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/file.h>
#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
+#include "msm_gpu_trace.h"
/*
* Cmdstream submission:
@@ -31,12 +25,13 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
- uint32_t nr_bos, uint32_t nr_cmds)
+ struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
+ struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
+ uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
- ((u64)nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = struct_size(submit, bos, nr_bos) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -46,9 +41,9 @@
return NULL;
submit->dev = dev;
+ submit->aspace = aspace;
submit->gpu = gpu;
submit->fence = NULL;
- submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->ring = gpu->rb[queue->prio];
@@ -74,27 +69,14 @@
kfree(submit);
}
-static inline unsigned long __must_check
-copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- return __copy_from_user_inatomic(to, from, n);
- return -EFAULT;
-}
-
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
int ret = 0;
- spin_lock(&file->table_lock);
- pagefault_disable();
-
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
- struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
@@ -103,34 +85,41 @@
*/
submit->bos[i].flags = 0;
- if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
- pagefault_enable();
- spin_unlock(&file->table_lock);
- if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
- ret = -EFAULT;
- goto out;
- }
- spin_lock(&file->table_lock);
- pagefault_disable();
+ if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
+ ret = -EFAULT;
+ i = 0;
+ goto out;
}
+/* at least one of READ and/or WRITE flags should be set: */
+#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
- !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
+ !(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
- goto out_unlock;
+ i = 0;
+ goto out;
}
+ submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
/* in validate_objects() we figure out if this is true: */
submit->bos[i].iova = submit_bo.presumed;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, submit_bo.handle);
+ obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
+ DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -139,12 +128,12 @@
if (!list_empty(&msm_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
- submit_bo.handle, i);
+ submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
submit->bos[i].obj = msm_obj;
@@ -152,7 +141,6 @@
}
out_unlock:
- pagefault_enable();
spin_unlock(&file->table_lock);
out:
@@ -167,10 +155,10 @@
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
- ww_mutex_unlock(&msm_obj->resv->lock);
+ ww_mutex_unlock(&msm_obj->base.resv->lock);
if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -193,7 +181,7 @@
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (ret)
goto fail;
@@ -215,7 +203,7 @@
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -241,7 +229,8 @@
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->resv);
+ ret = dma_resv_reserve_shared(msm_obj->base.resv,
+ 1);
if (ret)
return ret;
}
@@ -269,8 +258,8 @@
uint64_t iova;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
- submit->gpu->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(&msm_obj->base,
+ submit->aspace, &iova);
if (ret)
break;
@@ -317,6 +306,9 @@
uint32_t *ptr;
int ret = 0;
+ if (!nr_relocs)
+ return 0;
+
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
@@ -396,7 +388,7 @@
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
- drm_gem_object_unreference(&msm_obj->base);
+ drm_gem_object_put(&msm_obj->base);
}
ww_acquire_fini(&submit->ticket);
@@ -405,6 +397,7 @@
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
@@ -414,9 +407,9 @@
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
int out_fence_fd = -1;
+ struct pid *pid = get_pid(task_pid(current));
unsigned i;
- int ret;
-
+ int ret, submitid;
if (!gpu)
return -ENXIO;
@@ -439,7 +432,12 @@
if (!queue)
return -ENOENT;
+ /* Get a unique identifier for the submission for logging purposes */
+ submitid = atomic_inc_return(&ident) - 1;
+
ring = gpu->rb[queue->prio];
+ trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
+ args->nr_bos, args->nr_cmds);
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
struct dma_fence *in_fence;
@@ -474,12 +472,16 @@
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
+ args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out_unlock;
}
+ submit->pid = pid;
+ submit->ident = submitid;
+
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index ffbec22..1af5354 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
@@ -38,20 +27,73 @@
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt)
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
{
- if (!aspace || !vma->iova)
+ unsigned size = vma->node.size << PAGE_SHIFT;
+
+ /* Print a message if we try to purge a vma in use */
+ if (WARN_ON(vma->inuse > 0))
return;
- if (aspace->mmu) {
- unsigned size = vma->node.size << PAGE_SHIFT;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
- }
+ /* Don't do anything if the memory isn't mapped */
+ if (!vma->mapped)
+ return;
+
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+
+ vma->mapped = false;
+}
+
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (!WARN_ON(!vma->iova))
+ vma->inuse--;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int prot,
+ struct sg_table *sgt, int npages)
+{
+ unsigned size = npages << PAGE_SHIFT;
+ int ret = 0;
+
+ if (WARN_ON(!vma->iova))
+ return -EINVAL;
+
+ /* Increase the usage counter */
+ vma->inuse++;
+
+ if (vma->mapped)
+ return 0;
+
+ vma->mapped = true;
+
+ if (aspace && aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, prot);
+
+ if (ret)
+ vma->mapped = false;
+
+ return ret;
+}
+
+/* Close an iova. Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (WARN_ON(vma->inuse > 0 || vma->mapped))
+ return;
spin_lock(&aspace->lock);
- drm_mm_remove_node(&vma->node);
+ if (vma->iova)
+ drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0;
@@ -59,18 +101,16 @@
msm_gem_address_space_put(aspace);
}
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+/* Initialize a new vma and allocate an iova for it */
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages)
{
int ret;
- spin_lock(&aspace->lock);
- if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
- spin_unlock(&aspace->lock);
- return 0;
- }
+ if (WARN_ON(vma->iova))
+ return -EBUSY;
+ spin_lock(&aspace->lock);
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
@@ -78,19 +118,14 @@
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
+ vma->mapped = false;
- if (aspace->mmu) {
- unsigned size = npages << PAGE_SHIFT;
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- size, IOMMU_READ | IOMMU_WRITE);
- }
-
- /* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
- return ret;
+ return 0;
}
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
@@ -114,3 +149,26 @@
return aspace;
}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end)
+{
+ struct msm_gem_address_space *aspace;
+ u64 size = va_end - va_start;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&aspace->lock);
+ aspace->name = name;
+ aspace->mmu = msm_gpummu_new(dev, gpu);
+
+ drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
+ size >> PAGE_SHIFT);
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 52a2146..a052364 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -1,24 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
+#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
@@ -41,7 +32,11 @@
if (IS_ERR(opp))
return PTR_ERR(opp);
- clk_set_rate(gpu->core_clk, *freq);
+ if (gpu->funcs->gpu_set_freq)
+ gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
+ else
+ clk_set_rate(gpu->core_clk, *freq);
+
dev_pm_opp_put(opp);
return 0;
@@ -51,16 +46,14 @@
struct devfreq_dev_status *status)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
- u64 cycles;
- u32 freq = ((u32) status->current_frequency) / 1000000;
ktime_t time;
- status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
- gpu->funcs->gpu_busy(gpu, &cycles);
+ if (gpu->funcs->gpu_get_freq)
+ status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
+ else
+ status->current_frequency = clk_get_rate(gpu->core_clk);
- status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
-
- gpu->devfreq.busy_cycles = cycles;
+ status->busy_time = gpu->funcs->gpu_busy(gpu);
time = ktime_get();
status->total_time = ktime_us_delta(time, gpu->devfreq.time);
@@ -73,7 +66,10 @@
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
- *freq = (unsigned long) clk_get_rate(gpu->core_clk);
+ if (gpu->funcs->gpu_get_freq)
+ *freq = gpu->funcs->gpu_get_freq(gpu);
+ else
+ *freq = clk_get_rate(gpu->core_clk);
return 0;
}
@@ -88,7 +84,7 @@
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy || !gpu->core_clk)
+ if (!gpu->funcs->gpu_busy)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@@ -99,12 +95,15 @@
*/
gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
- &msm_devfreq_profile, "simple_ondemand", NULL);
+ &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
if (IS_ERR(gpu->devfreq.devfreq)) {
- dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
gpu->devfreq.devfreq = NULL;
}
+
+ devfreq_suspend_device(gpu->devfreq.devfreq);
}
static int enable_pwrrail(struct msm_gpu *gpu)
@@ -115,7 +114,7 @@
if (gpu->gpu_reg) {
ret = regulator_enable(gpu->gpu_reg);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
return ret;
}
}
@@ -123,7 +122,7 @@
if (gpu->gpu_cx) {
ret = regulator_enable(gpu->gpu_cx);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
return ret;
}
}
@@ -184,6 +183,14 @@
return 0;
}
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
+{
+ gpu->devfreq.busy_cycles = 0;
+ gpu->devfreq.time = ktime_get();
+
+ devfreq_resume_device(gpu->devfreq.devfreq);
+}
+
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
@@ -202,12 +209,7 @@
if (ret)
return ret;
- if (gpu->devfreq.devfreq) {
- gpu->devfreq.busy_cycles = 0;
- gpu->devfreq.time = ktime_get();
-
- devfreq_resume_device(gpu->devfreq.devfreq);
- }
+ msm_gpu_resume_devfreq(gpu);
gpu->needs_hw_init = true;
@@ -220,8 +222,7 @@
DBG("%s", gpu->name);
- if (gpu->devfreq.devfreq)
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ devfreq_suspend_device(gpu->devfreq.devfreq);
ret = disable_axi(gpu);
if (ret)
@@ -306,28 +307,28 @@
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
/* Don't record write only objects */
-
state_bo->size = obj->base.size;
state_bo->iova = iova;
- /* Only store the data for buffer objects marked for read */
- if ((flags & MSM_SUBMIT_BO_READ)) {
+ /* Only store data for non imported buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
void *ptr;
state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
if (!state_bo->data)
- return;
+ goto out;
ptr = msm_gem_get_vaddr_active(&obj->base);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
- return;
+ state_bo->data = NULL;
+ goto out;
}
memcpy(state_bo->data, ptr, obj->base.size);
msm_gem_put_vaddr(&obj->base);
}
-
+out:
state->nr_bos++;
}
@@ -336,6 +337,10 @@
{
struct msm_gpu_state *state;
+ /* Check if the target supports capturing crash state */
+ if (!gpu->funcs->gpu_state_get)
+ return;
+
/* Only save one crash state at a time */
if (gpu->crashstate)
return;
@@ -351,12 +356,15 @@
if (submit) {
int i;
- state->bos = kcalloc(submit->nr_bos,
+ state->bos = kcalloc(submit->nr_cmds,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
- for (i = 0; state->bos && i < submit->nr_bos; i++)
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova, submit->bos[i].flags);
+ for (i = 0; state->bos && i < submit->nr_cmds; i++) {
+ int idx = submit->cmd[i].idx;
+
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+ submit->bos[idx].iova, submit->bos[idx].flags);
+ }
}
/* Set the active crash state to be dumped on failure */
@@ -419,34 +427,25 @@
mutex_lock(&dev->struct_mutex);
- dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
struct task_struct *task;
+ /* Increment the fault counts */
+ gpu->global_faults++;
+ submit->queue->faults++;
+
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
-
- /*
- * So slightly annoying, in other paths like
- * mmap'ing gem buffers, mmap_sem is acquired
- * before struct_mutex, which means we can't
- * hold struct_mutex across the call to
- * get_cmdline(). But submits are retired
- * from the same in-order workqueue, so we can
- * safely drop the lock here without worrying
- * about the submit going away.
- */
- mutex_unlock(&dev->struct_mutex);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
- mutex_lock(&dev->struct_mutex);
}
if (comm && cmd) {
- dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
@@ -529,11 +528,11 @@
} else if (fence < ring->seqno) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
- dev_err(dev->dev, "%s: completed fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
- dev_err(dev->dev, "%s: submitted fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->seqno);
queue_work(priv->wq, &gpu->recover_work);
@@ -649,15 +648,33 @@
* Cmdstream submission/retirement:
*/
-static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_submit *submit)
{
+ int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ volatile struct msm_gpu_submit_stats *stats;
+ u64 elapsed, clock = 0;
int i;
+ stats = &ring->memptrs->stats[index];
+ /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
+ elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
+ do_div(elapsed, 192);
+
+ /* Calculate the clock frequency from the number of CP cycles */
+ if (elapsed) {
+ clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+ do_div(clock, elapsed);
+ }
+
+ trace_msm_gpu_submit_retired(submit, elapsed, clock,
+ stats->alwayson_start, stats->alwayson_end);
+
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
drm_gem_object_put(&msm_obj->base);
}
@@ -680,7 +697,7 @@
list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
if (dma_fence_is_signaled(submit->fence))
- retire_submit(gpu, submit);
+ retire_submit(gpu, ring, submit);
}
}
}
@@ -741,8 +758,7 @@
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_get(&msm_obj->base);
- msm_gem_get_iova(&msm_obj->base,
- submit->gpu->aspace, &iova);
+ msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
@@ -768,7 +784,7 @@
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
- int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
+ int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
if (ret < 1) {
gpu->nr_clocks = 0;
@@ -790,7 +806,6 @@
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
uint64_t va_start, uint64_t va_end)
{
- struct iommu_domain *iommu;
struct msm_gem_address_space *aspace;
int ret;
@@ -799,20 +814,27 @@
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
+ if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
+ struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
- iommu->geometry.aperture_start = va_start;
- iommu->geometry.aperture_end = va_end;
+ iommu->geometry.aperture_start = va_start;
+ iommu->geometry.aperture_end = va_end;
- dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+ DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ if (IS_ERR(aspace))
+ iommu_domain_free(iommu);
+ } else {
+ aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
+ va_start, va_end);
+ }
+
if (IS_ERR(aspace)) {
- dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
PTR_ERR(aspace));
- iommu_domain_free(iommu);
return ERR_CAST(aspace);
}
@@ -858,17 +880,17 @@
}
/* Get Interrupt: */
- gpu->irq = platform_get_irq_byname(pdev, config->irqname);
+ gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
ret = gpu->irq;
- dev_err(drm->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
goto fail;
}
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, gpu->name, gpu);
if (ret) {
- dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
@@ -901,22 +923,25 @@
config->va_start, config->va_end);
if (gpu->aspace == NULL)
- dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
else if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
goto fail;
}
- memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+ memptrs = msm_gem_kernel_new(drm,
+ sizeof(struct msm_rbmemptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
+ msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
@@ -929,7 +954,7 @@
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
- dev_err(drm->dev,
+ DRM_DEV_ERROR(drm->dev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
@@ -948,11 +973,7 @@
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -971,11 +992,7 @@
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 9122ee6..ab8f0f9 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -1,24 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_GPU_H__
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
@@ -31,7 +21,6 @@
struct msm_gpu_config {
const char *ioname;
- const char *irqname;
uint64_t va_start;
uint64_t va_end;
unsigned int nr_rings;
@@ -63,16 +52,18 @@
struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
- int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ unsigned long (*gpu_busy)(struct msm_gpu *gpu);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
+ unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
+ void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
};
struct msm_gpu {
@@ -102,6 +93,9 @@
/* does gpu need hw_init? */
bool needs_hw_init;
+ /* number of GPU hangs (for all contexts) */
+ int global_faults;
+
/* worker for handling active-list retiring: */
struct work_struct retire_work;
@@ -117,6 +111,8 @@
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
+ struct icc_path *icc_path;
+
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
@@ -185,6 +181,7 @@
u64 iova;
size_t size;
void *data;
+ bool encoded;
};
struct msm_gpu_state {
@@ -199,6 +196,7 @@
u32 wptr;
void *data;
int data_size;
+ bool encoded;
} ring[MSM_GPU_MAX_RINGS];
int nr_registers;
@@ -264,6 +262,7 @@
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu);
int msm_gpu_hw_init(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
new file mode 100644
index 0000000..122b847
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm_gpu
+#define TRACE_INCLUDE_FILE msm_gpu_trace
+
+TRACE_EVENT(msm_gpu_submit,
+ TP_PROTO(pid_t pid, u32 ringid, u32 id, u32 nr_bos, u32 nr_cmds),
+ TP_ARGS(pid, ringid, id, nr_bos, nr_cmds),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, nr_cmds)
+ __field(u32, nr_bos)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->id = id;
+ __entry->ringid = ringid;
+ __entry->nr_bos = nr_bos;
+ __entry->nr_cmds = nr_cmds
+ ),
+ TP_printk("id=%d pid=%d ring=%d bos=%d cmds=%d",
+ __entry->id, __entry->pid, __entry->ringid,
+ __entry->nr_bos, __entry->nr_cmds)
+);
+
+TRACE_EVENT(msm_gpu_submit_flush,
+ TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
+ TP_ARGS(submit, ticks),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->ticks = ticks;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d ticks=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_submit_retired,
+ TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
+ u64 start, u64 end),
+ TP_ARGS(submit, elapsed, clock, start, end),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, elapsed)
+ __field(u64, clock)
+ __field(u64, start_ticks)
+ __field(u64, end_ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->elapsed = elapsed;
+ __entry->clock = clock;
+ __entry->start_ticks = start;
+ __entry->end_ticks = end;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d elapsed=%lld ns mhz=%lld start=%lld end=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->elapsed, __entry->clock,
+ __entry->start_ticks, __entry->end_ticks)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu_tracepoints.c b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
new file mode 100644
index 0000000..72c074f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "msm_gem.h"
+#include "msm_ringbuffer.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_gpu_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
new file mode 100644
index 0000000..34f643a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/dma-mapping.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+#include "adreno/a2xx.xml.h"
+
+struct msm_gpummu {
+ struct msm_mmu base;
+ struct msm_gpu *gpu;
+ dma_addr_t pt_base;
+ uint32_t *table;
+};
+#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+ return 0;
+}
+
+static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+}
+
+static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned prot_bits = 0;
+ unsigned i, j;
+
+ if (prot & IOMMU_WRITE)
+ prot_bits |= 1;
+ if (prot & IOMMU_READ)
+ prot_bits |= 2;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ dma_addr_t addr = sg->dma_address;
+ for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
+ gpummu->table[idx] = addr | prot_bits;
+ addr += GPUMMU_PAGE_SIZE;
+ }
+ }
+
+ /* we can improve by deferring flush for multiple map() */
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ unsigned i;
+
+ for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+ gpummu->table[idx] = 0;
+
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static void msm_gpummu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+
+ dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+
+ kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_gpummu_attach,
+ .detach = msm_gpummu_detach,
+ .map = msm_gpummu_map,
+ .unmap = msm_gpummu_unmap,
+ .destroy = msm_gpummu_destroy,
+};
+
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+ struct msm_gpummu *gpummu;
+
+ gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+ if (!gpummu)
+ return ERR_PTR(-ENOMEM);
+
+ gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+ GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!gpummu->table) {
+ kfree(gpummu);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gpummu->gpu = gpu;
+ msm_mmu_init(&gpummu->base, dev, &funcs);
+
+ return &gpummu->base;
+}
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error)
+{
+ dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+
+ *pt_base = base;
+ *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 2a90aa4..8c95c31 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
@@ -30,7 +19,7 @@
struct msm_iommu *iommu = arg;
if (iommu->base.handler)
return iommu->base.handler(iommu->base.arg, iova, flags);
- pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+ pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
return 0;
}
@@ -38,13 +27,8 @@
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int ret;
- pm_runtime_get_sync(mmu->dev);
- ret = iommu_attach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
-
- return ret;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
@@ -52,9 +36,7 @@
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
@@ -63,22 +45,17 @@
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
-// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
-// pm_runtime_put_sync(mmu->dev);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_unmap(iommu->domain, iova, len);
- pm_runtime_put_sync(mmu->dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index fd88ceb..1cbef6b 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_KMS_H__
@@ -41,13 +30,76 @@
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
- /* modeset, bracketing atomic_commit(): */
+
+ /*
+ * Atomic commit handling:
+ *
+ * Note that in the case of async commits, the funcs which take
+ * a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
+ * might not be evenly balanced with ->prepare_commit(), however
+ * each crtc that effected by a ->prepare_commit() (potentially
+ * multiple times) will eventually (at end of vsync period) be
+ * flushed and completed.
+ *
+ * This has some implications about tracking of cleanup state,
+ * for example SMP blocks to release after commit completes. Ie.
+ * cleanup state should be also duplicated in the various
+ * duplicate_state() methods, as the current cleanup state at
+ * ->complete_commit() time may have accumulated cleanup work
+ * from multiple commits.
+ */
+
+ /**
+ * Enable/disable power/clks needed for hw access done in other
+ * commit related methods.
+ *
+ * If mdp4 is migrated to runpm, we could probably drop these
+ * and use runpm directly.
+ */
+ void (*enable_commit)(struct msm_kms *kms);
+ void (*disable_commit)(struct msm_kms *kms);
+
+ /**
+ * If the kms backend supports async commit, it should implement
+ * this method to return the time of the next vsync. This is
+ * used to determine a time slightly before vsync, for the async
+ * commit timer to run and complete an async commit.
+ */
+ ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc);
+
+ /**
+ * Prepare for atomic commit. This is called after any previous
+ * (async or otherwise) commit has completed.
+ */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- /* functions to wait for atomic commit completed on each CRTC */
- void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
- struct drm_crtc *crtc);
+
+ /**
+ * Flush an atomic commit. This is called after the hardware
+ * updates have already been pushed down to effected planes/
+ * crtcs/encoders/connectors.
+ */
+ void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /**
+ * Wait for any in-progress flush to complete on the specified
+ * crtcs. This should not block if there is no in-progress
+ * commit (ie. don't just wait for a vblank), as it will also
+ * be called before ->prepare_commit() to ensure any potential
+ * "async" commit has completed.
+ */
+ void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /**
+ * Clean up after commit is completed. This is called after
+ * ->wait_flush(), to give the backend a chance to do any
+ * post-commit cleanup.
+ */
+ void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /*
+ * Format handling:
+ */
+
/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
@@ -57,6 +109,7 @@
const struct msm_format *msm_fmt,
const struct drm_mode_fb_cmd2 *cmd,
struct drm_gem_object **bos);
+
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@@ -67,9 +120,6 @@
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
- /* pm suspend/resume hooks */
- int (*pm_suspend)(struct device *dev);
- int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
@@ -78,20 +128,48 @@
#endif
};
+struct msm_kms;
+
+/*
+ * A per-crtc timer for pending async atomic flushes. Scheduled to expire
+ * shortly before vblank to flush pending async updates.
+ */
+struct msm_pending_timer {
+ struct hrtimer timer;
+ struct work_struct work;
+ struct msm_kms *kms;
+ unsigned crtc_idx;
+};
+
struct msm_kms {
const struct msm_kms_funcs *funcs;
+ struct drm_device *dev;
/* irq number to be passed on to drm_irq_install */
int irq;
/* mapper-id used to request GEM buffer mapped for scanout: */
struct msm_gem_address_space *aspace;
+
+ /*
+ * For async commit, where ->flush_commit() and later happens
+ * from the crtc's pending_timer close to end of the frame:
+ */
+ struct mutex commit_lock;
+ unsigned pending_crtc_mask;
+ struct msm_pending_timer pending_timers[MAX_CRTCS];
};
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
+ unsigned i;
+
+ mutex_init(&kms->commit_lock);
kms->funcs = funcs;
+
+ for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
+ msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
}
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
@@ -112,4 +190,8 @@
int mdp5_mdss_init(struct drm_device *dev);
int dpu_mdss_init(struct drm_device *dev);
+#define for_each_crtc_mask(dev, crtc, crtc_mask) \
+ drm_for_each_crtc(crtc, dev) \
+ for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
+
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index aa2c5d4..871d563 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_MMU_H__
@@ -25,8 +14,7 @@
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
@@ -54,4 +42,7 @@
mmu->handler = handler;
}
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 5ab21bd..3a27153 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* For profiling, userspace can:
@@ -26,6 +15,9 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_gpu.h"
@@ -205,7 +197,6 @@
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_perf_state *perf;
- struct dentry *ent;
/* only create on first minor: */
if (priv->perf)
@@ -220,19 +211,9 @@
mutex_init(&perf->read_lock);
priv->perf = perf;
- ent = debugfs_create_file("perf", S_IFREG | S_IRUGO,
- minor->debugfs_root, perf, &perf_debugfs_fops);
- if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/perf\n",
- minor->debugfs_root);
- goto fail;
- }
-
+ debugfs_create_file("perf", S_IFREG | S_IRUGO, minor->debugfs_root,
+ perf, &perf_debugfs_fops);
return 0;
-
-fail:
- msm_perf_debugfs_cleanup(priv);
- return -1;
}
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv)
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index f7a0ede..c7832a9 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* For debugging crashes, userspace can:
@@ -42,11 +31,14 @@
#ifdef CONFIG_DEBUG_FS
-#include <linux/kfifo.h>
-#include <linux/debugfs.h>
#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
#include <linux/wait.h>
+#include <drm/drm_file.h>
+
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
@@ -115,7 +107,9 @@
char *fptr = &fifo->buf[fifo->head];
int n;
- wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+ if (!rd->open)
+ return;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +207,10 @@
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
+
rd->open = false;
+ wake_up_all(&rd->fifo_event);
+
return 0;
}
@@ -239,8 +236,6 @@
static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{
struct msm_rd_state *rd;
- struct dentry *ent;
- int ret = 0;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
@@ -253,20 +248,10 @@
init_waitqueue_head(&rd->fifo_event);
- ent = debugfs_create_file(name, S_IFREG | S_IRUGO,
- minor->debugfs_root, rd, &rd_debugfs_fops);
- if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
- minor->debugfs_root, name);
- ret = -ENOMEM;
- goto fail;
- }
+ debugfs_create_file(name, S_IFREG | S_IRUGO, minor->debugfs_root, rd,
+ &rd_debugfs_fops);
return rd;
-
-fail:
- rd_cleanup(rd);
- return ERR_PTR(ret);
}
int msm_rd_debugfs_init(struct drm_minor *minor)
@@ -348,6 +333,12 @@
msm_gem_put_vaddr(&obj->base);
}
+static bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
@@ -369,7 +360,7 @@
va_list args;
va_start(args, fmt);
- n = vsnprintf(msg, sizeof(msg), fmt, args);
+ n = vscnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
@@ -378,26 +369,27 @@
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
- n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
+ n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
}
rcu_read_unlock();
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- for (i = 0; rd_full && i < submit->nr_bos; i++)
- snapshot_buf(rd, submit, i, 0, 0);
+ for (i = 0; i < submit->nr_bos; i++)
+ if (should_dump(submit, i))
+ snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
- if (!rd_full) {
+ if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
submit->cmd[i].iova, szd * 4);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6f5295b..e397c44 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_ringbuffer.h"
@@ -36,15 +25,18 @@
ring->gpu = gpu;
ring->id = id;
- /* Pass NULL for the iova pointer - we will map it later */
+
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
+ MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
+
+ msm_gem_object_set_name(ring->bo, "ring%d", id);
+
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
@@ -73,10 +65,7 @@
msm_fence_context_free(ring->fctx);
- if (ring->bo) {
- msm_gem_put_iova(ring->bo, ring->gpu->aspace);
- msm_gem_put_vaddr(ring->bo);
- drm_gem_object_put_unlocked(ring->bo);
- }
+ msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index cffce09..7764373 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __MSM_RINGBUFFER_H__
@@ -23,9 +12,25 @@
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+#define rbmemptr_stats(ring, index, member) \
+ (rbmemptr((ring), stats) + \
+ ((index) * sizeof(struct msm_gpu_submit_stats)) + \
+ offsetof(struct msm_gpu_submit_stats, member))
+
+struct msm_gpu_submit_stats {
+ u64 cpcycles_start;
+ u64 cpcycles_end;
+ u64 alwayson_start;
+ u64 alwayson_end;
+};
+
+#define MSM_GPU_SUBMIT_STATS_COUNT 64
+
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
+
+ volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
};
struct msm_ringbuffer {
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 5115f75..001fbf5 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -1,17 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/kref.h>
+#include <linux/uaccess.h>
+
#include "msm_gpu.h"
void msm_submitqueue_destroy(struct kref *kref)
@@ -120,6 +113,47 @@
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
+static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
+ struct drm_msm_submitqueue_query *args)
+{
+ size_t size = min_t(size_t, args->len, sizeof(queue->faults));
+ int ret;
+
+ /* If a zero length was passed in, return the data size we expect */
+ if (!args->len) {
+ args->len = sizeof(queue->faults);
+ return 0;
+ }
+
+ /* Set the length to the actual size of the data */
+ args->len = size;
+
+ ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
+
+ return ret ? -EFAULT : 0;
+}
+
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args)
+{
+ struct msm_gpu_submitqueue *queue;
+ int ret = -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ queue = msm_submitqueue_get(ctx, args->id);
+ if (!queue)
+ return -ENOENT;
+
+ if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
+ ret = msm_submitqueue_query_faults(queue, args);
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;