Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 833e04a..425ab6f 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -14,6 +14,7 @@
source "drivers/soc/renesas/Kconfig"
source "drivers/soc/rockchip/Kconfig"
source "drivers/soc/samsung/Kconfig"
+source "drivers/soc/sifive/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
@@ -21,5 +22,6 @@
source "drivers/soc/versatile/Kconfig"
source "drivers/soc/xilinx/Kconfig"
source "drivers/soc/zte/Kconfig"
+source "drivers/soc/kendryte/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 2ec3550..36452be 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -11,7 +11,7 @@
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
-obj-$(CONFIG_ARCH_MXC) += imx/
+obj-y += imx/
obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
@@ -20,6 +20,7 @@
obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
+obj-$(CONFIG_SOC_SIFIVE) += sifive/
obj-y += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
@@ -27,3 +28,4 @@
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
obj-y += xilinx/
obj-$(CONFIG_ARCH_ZX) += zte/
+obj-$(CONFIG_SOC_KENDRYTE) += kendryte/
diff --git a/drivers/soc/actions/owl-sps-helper.c b/drivers/soc/actions/owl-sps-helper.c
index 291a206..e3f3660 100644
--- a/drivers/soc/actions/owl-sps-helper.c
+++ b/drivers/soc/actions/owl-sps-helper.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/soc/actions/owl-sps.h>
#define OWL_SPS_PG_CTL 0x0
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index bc2c912..321c5e2 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -48,6 +48,19 @@
Say yes to expose Amlogic Meson Everything-Else Power Domains as
Generic Power Domains.
+config MESON_SECURE_PM_DOMAINS
+ bool "Amlogic Meson Secure Power Domains driver"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ depends on PM && OF
+ depends on HAVE_ARM_SMCCC
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Support for the power controller on Amlogic A1/C1 series.
+ Say yes to expose Amlogic Meson Secure Power Domains as Generic
+ Power Domains.
+
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
depends on ARCH_MESON || COMPILE_TEST
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
index de79d04..7b8c5d3 100644
--- a/drivers/soc/amlogic/Makefile
+++ b/drivers/soc/amlogic/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
+obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 3f0261d..5164a4d 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -14,13 +14,24 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <dt-bindings/power/meson8-power.h>
+#include <dt-bindings/power/meson-axg-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
+#include <dt-bindings/power/meson-gxbb-power.h>
#include <dt-bindings/power/meson-sm1-power.h>
/* AO Offsets */
-#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
-#define AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
+#define GX_AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
+#define GX_AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
+
+/*
+ * Meson8/Meson8b/Meson8m2 only expose the power management registers of the
+ * AO-bus as syscon. 0x3a from GX translates to 0x02, 0x3b translates to 0x03
+ * and so on.
+ */
+#define MESON8_AO_RTI_GEN_PWR_SLEEP0 (0x02 << 2)
+#define MESON8_AO_RTI_GEN_PWR_ISO0 (0x03 << 2)
/* HHI Offsets */
@@ -66,18 +77,25 @@
/* TOP Power Domains */
-static struct meson_ee_pwrc_top_domain g12a_pwrc_vpu = {
- .sleep_reg = AO_RTI_GEN_PWR_SLEEP0,
+static struct meson_ee_pwrc_top_domain gx_pwrc_vpu = {
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(8),
- .iso_reg = AO_RTI_GEN_PWR_SLEEP0,
+ .iso_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
+ .iso_mask = BIT(9),
+};
+
+static struct meson_ee_pwrc_top_domain meson8_pwrc_vpu = {
+ .sleep_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
+ .sleep_mask = BIT(8),
+ .iso_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.iso_mask = BIT(9),
};
#define SM1_EE_PD(__bit) \
{ \
- .sleep_reg = AO_RTI_GEN_PWR_SLEEP0, \
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0, \
.sleep_mask = BIT(__bit), \
- .iso_reg = AO_RTI_GEN_PWR_ISO0, \
+ .iso_reg = GX_AO_RTI_GEN_PWR_ISO0, \
.iso_mask = BIT(__bit), \
}
@@ -117,6 +135,11 @@
{ __reg, BIT(14) }, \
{ __reg, BIT(15) }
+static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
@@ -124,10 +147,26 @@
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_eth[] = {
+static struct meson_ee_pwrc_mem_domain gxbb_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
+static struct meson_ee_pwrc_mem_domain meson_pwrc_mem_eth[] = {
{ HHI_MEM_PD_REG0, GENMASK(3, 2) },
};
+static struct meson_ee_pwrc_mem_domain meson8_pwrc_audio_dsp_mem[] = {
+ { HHI_MEM_PD_REG0, GENMASK(1, 0) },
+};
+
+static struct meson_ee_pwrc_mem_domain meson8_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
@@ -157,6 +196,10 @@
{ HHI_MEM_PD_REG0, GENMASK(25, 18) },
};
+static struct meson_ee_pwrc_mem_domain axg_pwrc_mem_audio[] = {
+ { HHI_MEM_PD_REG0, GENMASK(5, 4) },
+};
+
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
{ HHI_MEM_PD_REG0, GENMASK(5, 4) },
{ HHI_AUDIO_MEM_PD_REG0, GENMASK(1, 0) },
@@ -198,10 +241,43 @@
static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain);
+static struct meson_ee_pwrc_domain_desc axg_pwrc_domains[] = {
+ [PWRC_AXG_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, axg_pwrc_mem_vpu,
+ pwrc_ee_get_power, 5, 2),
+ [PWRC_AXG_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+ [PWRC_AXG_AUDIO_ID] = MEM_PD("AUDIO", axg_pwrc_mem_audio),
+};
+
static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
- [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &g12a_pwrc_vpu, g12a_pwrc_mem_vpu,
+ [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
pwrc_ee_get_power, 11, 2),
- [PWRC_G12A_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+ [PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+};
+
+static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
+ [PWRC_GXBB_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, gxbb_pwrc_mem_vpu,
+ pwrc_ee_get_power, 12, 2),
+ [PWRC_GXBB_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+};
+
+static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
+ [PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
+ meson8_pwrc_mem_vpu, pwrc_ee_get_power,
+ 0, 1),
+ [PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
+ meson_pwrc_mem_eth),
+ [PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
+ meson8_pwrc_audio_dsp_mem),
+};
+
+static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
+ [PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
+ meson8_pwrc_mem_vpu, pwrc_ee_get_power,
+ 11, 1),
+ [PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
+ meson_pwrc_mem_eth),
+ [PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
+ meson8_pwrc_audio_dsp_mem),
};
static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
@@ -216,7 +292,7 @@
[PWRC_SM1_GE2D_ID] = TOP_PD("GE2D", &sm1_pwrc_ge2d, sm1_pwrc_mem_ge2d,
pwrc_ee_get_power),
[PWRC_SM1_AUDIO_ID] = MEM_PD("AUDIO", sm1_pwrc_mem_audio),
- [PWRC_SM1_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+ [PWRC_SM1_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
struct meson_ee_pwrc_domain {
@@ -374,8 +450,8 @@
if (ret)
return ret;
- ret = pm_genpd_init(&dom->base, &pm_domain_always_on_gov,
- false);
+ dom->base.flags = GENPD_FLAG_ALWAYS_ON;
+ ret = pm_genpd_init(&dom->base, NULL, false);
if (ret)
return ret;
} else {
@@ -470,6 +546,26 @@
.domains = g12a_pwrc_domains,
};
+static struct meson_ee_pwrc_domain_data meson_ee_axg_pwrc_data = {
+ .count = ARRAY_SIZE(axg_pwrc_domains),
+ .domains = axg_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_gxbb_pwrc_data = {
+ .count = ARRAY_SIZE(gxbb_pwrc_domains),
+ .domains = gxbb_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_m8_pwrc_data = {
+ .count = ARRAY_SIZE(meson8_pwrc_domains),
+ .domains = meson8_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_m8b_pwrc_data = {
+ .count = ARRAY_SIZE(meson8b_pwrc_domains),
+ .domains = meson8b_pwrc_domains,
+};
+
static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
.count = ARRAY_SIZE(sm1_pwrc_domains),
.domains = sm1_pwrc_domains,
@@ -477,6 +573,26 @@
static const struct of_device_id meson_ee_pwrc_match_table[] = {
{
+ .compatible = "amlogic,meson8-pwrc",
+ .data = &meson_ee_m8_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson8b-pwrc",
+ .data = &meson_ee_m8b_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-pwrc",
+ .data = &meson_ee_m8b_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson-axg-pwrc",
+ .data = &meson_ee_axg_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-pwrc",
+ .data = &meson_ee_gxbb_pwrc_data,
+ },
+ {
.compatible = "amlogic,meson-g12a-pwrc",
.data = &meson_ee_g12a_pwrc_data,
},
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index 511b685..21b4bc8 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -339,8 +339,8 @@
return ret;
}
- pm_genpd_init(&vpu_pd->genpd, &pm_domain_always_on_gov,
- powered_off);
+ vpu_pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
+ pm_genpd_init(&vpu_pd->genpd, NULL, powered_off);
return of_genpd_add_provider_simple(pdev->dev.of_node,
&vpu_pd->genpd);
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 6d0d04f..6f54bd8 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -40,6 +40,7 @@
{ "G12A", 0x28 },
{ "G12B", 0x29 },
{ "SM1", 0x2b },
+ { "A1", 0x2c },
};
static const struct meson_gx_package_id {
@@ -65,9 +66,13 @@
{ "A113D", 0x25, 0x22, 0xff },
{ "S905D2", 0x28, 0x10, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
- { "S922X", 0x29, 0x40, 0xf0 },
{ "A311D", 0x29, 0x10, 0xf0 },
- { "S905X3", 0x2b, 0x5, 0xf },
+ { "S922X", 0x29, 0x40, 0xf0 },
+ { "S905D3", 0x2b, 0x4, 0xf5 },
+ { "S905X3", 0x2b, 0x5, 0xf5 },
+ { "S905X3", 0x2b, 0x10, 0x3f },
+ { "S905D3", 0x2b, 0x30, 0x3f },
+ { "A113L", 0x2c, 0x0, 0xf8 },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
new file mode 100644
index 0000000..5fb29a4
--- /dev/null
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Amlogic, Inc.
+ * Author: Jianxin Pan <jianxin.pan@amlogic.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <dt-bindings/power/meson-a1-power.h>
+#include <linux/arm-smccc.h>
+#include <linux/firmware/meson/meson_sm.h>
+
+#define PWRC_ON 1
+#define PWRC_OFF 0
+
+struct meson_secure_pwrc_domain {
+ struct generic_pm_domain base;
+ unsigned int index;
+ struct meson_secure_pwrc *pwrc;
+};
+
+struct meson_secure_pwrc {
+ struct meson_secure_pwrc_domain *domains;
+ struct genpd_onecell_data xlate;
+ struct meson_sm_firmware *fw;
+};
+
+struct meson_secure_pwrc_domain_desc {
+ unsigned int index;
+ unsigned int flags;
+ char *name;
+ bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain);
+};
+
+struct meson_secure_pwrc_domain_data {
+ unsigned int count;
+ struct meson_secure_pwrc_domain_desc *domains;
+};
+
+static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain)
+{
+ int is_off = 1;
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off,
+ pwrc_domain->index, 0, 0, 0, 0) < 0)
+ pr_err("failed to get power domain status\n");
+
+ return is_off;
+}
+
+static int meson_secure_pwrc_off(struct generic_pm_domain *domain)
+{
+ int ret = 0;
+ struct meson_secure_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_secure_pwrc_domain, base);
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
+ pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) {
+ pr_err("failed to set power domain off\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int meson_secure_pwrc_on(struct generic_pm_domain *domain)
+{
+ int ret = 0;
+ struct meson_secure_pwrc_domain *pwrc_domain =
+ container_of(domain, struct meson_secure_pwrc_domain, base);
+
+ if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
+ pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) {
+ pr_err("failed to set power domain on\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+#define SEC_PD(__name, __flag) \
+[PWRC_##__name##_ID] = \
+{ \
+ .name = #__name, \
+ .index = PWRC_##__name##_ID, \
+ .is_off = pwrc_secure_is_off, \
+ .flags = __flag, \
+}
+
+static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
+ SEC_PD(DSPA, 0),
+ SEC_PD(DSPB, 0),
+ /* UART should keep working in ATF after suspend and before resume */
+ SEC_PD(UART, GENPD_FLAG_ALWAYS_ON),
+ /* DMC is for DDR PHY ana/dig and DMC, and should be always on */
+ SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(I2C, 0),
+ SEC_PD(PSRAM, 0),
+ SEC_PD(ACODEC, 0),
+ SEC_PD(AUDIO, 0),
+ SEC_PD(OTP, 0),
+ SEC_PD(DMA, 0),
+ SEC_PD(SD_EMMC, 0),
+ SEC_PD(RAMA, 0),
+ /* SRAMB is used as ATF runtime memory, and should be always on */
+ SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(IR, 0),
+ SEC_PD(SPICC, 0),
+ SEC_PD(SPIFC, 0),
+ SEC_PD(USB, 0),
+ /* NIC is for the Arm NIC-400 interconnect, and should be always on */
+ SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(PDMIN, 0),
+ SEC_PD(RSA, 0),
+};
+
+static int meson_secure_pwrc_probe(struct platform_device *pdev)
+{
+ int i;
+ struct device_node *sm_np;
+ struct meson_secure_pwrc *pwrc;
+ const struct meson_secure_pwrc_domain_data *match;
+
+ match = of_device_get_match_data(&pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
+ sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm");
+ if (!sm_np) {
+ dev_err(&pdev->dev, "no secure-monitor node\n");
+ return -ENODEV;
+ }
+
+ pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
+ if (!pwrc)
+ return -ENOMEM;
+
+ pwrc->fw = meson_sm_get(sm_np);
+ of_node_put(sm_np);
+ if (!pwrc->fw)
+ return -EPROBE_DEFER;
+
+ pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->xlate.domains),
+ GFP_KERNEL);
+ if (!pwrc->xlate.domains)
+ return -ENOMEM;
+
+ pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
+ sizeof(*pwrc->domains), GFP_KERNEL);
+ if (!pwrc->domains)
+ return -ENOMEM;
+
+ pwrc->xlate.num_domains = match->count;
+ platform_set_drvdata(pdev, pwrc);
+
+ for (i = 0 ; i < match->count ; ++i) {
+ struct meson_secure_pwrc_domain *dom = &pwrc->domains[i];
+
+ if (!match->domains[i].index)
+ continue;
+
+ dom->pwrc = pwrc;
+ dom->index = match->domains[i].index;
+ dom->base.name = match->domains[i].name;
+ dom->base.flags = match->domains[i].flags;
+ dom->base.power_on = meson_secure_pwrc_on;
+ dom->base.power_off = meson_secure_pwrc_off;
+
+ pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom));
+
+ pwrc->xlate.domains[i] = &dom->base;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
+}
+
+static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
+ .domains = a1_pwrc_domains,
+ .count = ARRAY_SIZE(a1_pwrc_domains),
+};
+
+static const struct of_device_id meson_secure_pwrc_match_table[] = {
+ {
+ .compatible = "amlogic,meson-a1-pwrc",
+ .data = &meson_secure_a1_pwrc_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson_secure_pwrc_driver = {
+ .probe = meson_secure_pwrc_probe,
+ .driver = {
+ .name = "meson_secure_pwrc",
+ .of_match_table = meson_secure_pwrc_match_table,
+ },
+};
+builtin_platform_driver(meson_secure_pwrc_driver);
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index 323e177..c95fa30 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -8,7 +8,7 @@
config ASPEED_LPC_CTRL
depends on SOC_ASPEED && REGMAP && MFD_SYSCON
tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
- ---help---
+ help
Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
ioctl()s, the driver also provides a read/write interface to a BMC ram
region where the host LPC read/write region can be buffered.
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 040c7dc..71b555c 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -251,10 +251,9 @@
}
lpc_ctrl->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpc_ctrl->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return PTR_ERR(lpc_ctrl->clk);
- }
+ if (IS_ERR(lpc_ctrl->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
+ "couldn't get clock\n");
rc = clk_prepare_enable(lpc_ctrl->clk);
if (rc) {
dev_err(dev, "couldn't enable clock\n");
diff --git a/drivers/soc/atmel/Kconfig b/drivers/soc/atmel/Kconfig
index 0552813..50caf6d 100644
--- a/drivers/soc/atmel/Kconfig
+++ b/drivers/soc/atmel/Kconfig
@@ -5,3 +5,14 @@
default ARCH_AT91
help
Include support for the SoC bus on the Atmel ARM SoCs.
+
+config AT91_SOC_SFR
+ tristate "Special Function Registers support"
+ depends on ARCH_AT91 || COMPILE_TEST
+ help
+ This is a driver for the Special Function Registers available on
+ Atmel SAMA5Dx SoCs, providing access to specific aspects of the
+ integrated memory, bridge implementations, processor etc.
+
+ This driver can also be built as a module. If so, the module
+ will be called sfr.
diff --git a/drivers/soc/atmel/Makefile b/drivers/soc/atmel/Makefile
index 7ca355d..d849a89 100644
--- a/drivers/soc/atmel/Makefile
+++ b/drivers/soc/atmel/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_AT91_SOC_ID) += soc.o
+obj-$(CONFIG_AT91_SOC_SFR) += sfr.o
diff --git a/drivers/soc/atmel/sfr.c b/drivers/soc/atmel/sfr.c
new file mode 100644
index 0000000..0525eef
--- /dev/null
+++ b/drivers/soc/atmel/sfr.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sfr.c - driver for special function registers
+ *
+ * Copyright (C) 2019 Bootlin.
+ *
+ */
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/random.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define SFR_SN0 0x4c
+#define SFR_SN_SIZE 8
+
+struct atmel_sfr_priv {
+ struct regmap *regmap;
+};
+
+static int atmel_sfr_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct atmel_sfr_priv *priv = context;
+
+ return regmap_bulk_read(priv->regmap, SFR_SN0 + offset,
+ buf, bytes / 4);
+}
+
+static struct nvmem_config atmel_sfr_nvmem_config = {
+ .name = "atmel-sfr",
+ .read_only = true,
+ .word_size = 4,
+ .stride = 4,
+ .size = SFR_SN_SIZE,
+ .reg_read = atmel_sfr_read,
+};
+
+static int atmel_sfr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct nvmem_device *nvmem;
+ struct atmel_sfr_priv *priv;
+ u8 sn[SFR_SN_SIZE];
+ int ret;
+
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "cannot get parent's regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ atmel_sfr_nvmem_config.dev = dev;
+ atmel_sfr_nvmem_config.priv = priv;
+
+ nvmem = devm_nvmem_register(dev, &atmel_sfr_nvmem_config);
+ if (IS_ERR(nvmem)) {
+ dev_err(dev, "error registering nvmem config\n");
+ return PTR_ERR(nvmem);
+ }
+
+ ret = atmel_sfr_read(priv, 0, sn, SFR_SN_SIZE);
+ if (ret == 0)
+ add_device_randomness(sn, SFR_SN_SIZE);
+
+ return ret;
+}
+
+static const struct of_device_id atmel_sfr_dt_ids[] = {
+ {
+ .compatible = "atmel,sama5d2-sfr",
+ }, {
+ .compatible = "atmel,sama5d4-sfr",
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, atmel_sfr_dt_ids);
+
+static struct platform_driver atmel_sfr_driver = {
+ .probe = atmel_sfr_probe,
+ .driver = {
+ .name = "atmel-sfr",
+ .of_match_table = atmel_sfr_dt_ids,
+ },
+};
+module_platform_driver(atmel_sfr_driver);
+
+MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
+MODULE_DESCRIPTION("Atmel SFR SN driver for SAMA5D2/4 SoC family");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 4b41745..5d06ee7 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -66,8 +66,9 @@
AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
- AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH,
- "sam9x60", "sam9x60"),
+#endif
+#ifdef CONFIG_SOC_SAM9X60
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"),
#endif
#ifdef CONFIG_SOC_SAMA5
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 648e326..24f92a6 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -22,6 +22,15 @@
This enables support for the RPi power domains which can be enabled
or disabled via the RPi firmware.
+config SOC_BCM63XX
+ bool "Broadcom 63xx SoC drivers"
+ depends on BMIPS_GENERIC || COMPILE_TEST
+ help
+ Enables drivers for the Broadcom 63xx series of chips.
+ Drivers can be enabled individually within this menu.
+
+ If unsure, say N.
+
config SOC_BRCMSTB
bool "Broadcom STB SoC drivers"
depends on ARM || ARM64 || BMIPS_GENERIC || COMPILE_TEST
@@ -33,6 +42,7 @@
If unsure, say N.
+source "drivers/soc/bcm/bcm63xx/Kconfig"
source "drivers/soc/bcm/brcmstb/Kconfig"
endmenu
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index d92268a..7bc90e0 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
+obj-$(CONFIG_SOC_BCM63XX) += bcm63xx/
obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/bcm63xx/Kconfig b/drivers/soc/bcm/bcm63xx/Kconfig
new file mode 100644
index 0000000..16f648a
--- /dev/null
+++ b/drivers/soc/bcm/bcm63xx/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if SOC_BCM63XX
+
+config BCM63XX_POWER
+ bool "BCM63xx power domain driver"
+ depends on BMIPS_GENERIC || (COMPILE_TEST && OF)
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the BCM63xx power domains controller on
+ BCM6318, BCM6328, BCM6362 and BCM63268 SoCs.
+
+endif # SOC_BCM63XX
diff --git a/drivers/soc/bcm/bcm63xx/Makefile b/drivers/soc/bcm/bcm63xx/Makefile
new file mode 100644
index 0000000..0710d5e
--- /dev/null
+++ b/drivers/soc/bcm/bcm63xx/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_BCM63XX_POWER) += bcm63xx-power.o
diff --git a/drivers/soc/bcm/bcm63xx/bcm63xx-power.c b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
new file mode 100644
index 0000000..515fe18
--- /dev/null
+++ b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BCM63xx Power Domain Controller Driver
+ *
+ * Copyright (C) 2020 Álvaro Fernández Rojas <noltari@gmail.com>
+ */
+
+#include <dt-bindings/soc/bcm6318-pm.h>
+#include <dt-bindings/soc/bcm6328-pm.h>
+#include <dt-bindings/soc/bcm6362-pm.h>
+#include <dt-bindings/soc/bcm63268-pm.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+struct bcm63xx_power_dev {
+ struct generic_pm_domain genpd;
+ struct bcm63xx_power *power;
+ uint32_t mask;
+};
+
+struct bcm63xx_power {
+ void __iomem *base;
+ spinlock_t lock;
+ struct bcm63xx_power_dev *dev;
+ struct genpd_onecell_data genpd_data;
+ struct generic_pm_domain **genpd;
+};
+
+struct bcm63xx_power_data {
+ const char * const name;
+ uint8_t bit;
+ unsigned int flags;
+};
+
+static int bcm63xx_power_get_state(struct bcm63xx_power_dev *pmd, bool *is_on)
+{
+ struct bcm63xx_power *power = pmd->power;
+
+ if (!pmd->mask) {
+ *is_on = false;
+ return -EINVAL;
+ }
+
+ *is_on = !(__raw_readl(power->base) & pmd->mask);
+
+ return 0;
+}
+
+static int bcm63xx_power_set_state(struct bcm63xx_power_dev *pmd, bool on)
+{
+ struct bcm63xx_power *power = pmd->power;
+ unsigned long flags;
+ uint32_t val;
+
+ if (!pmd->mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&power->lock, flags);
+ val = __raw_readl(power->base);
+ if (on)
+ val &= ~pmd->mask;
+ else
+ val |= pmd->mask;
+ __raw_writel(val, power->base);
+ spin_unlock_irqrestore(&power->lock, flags);
+
+ return 0;
+}
+
+static int bcm63xx_power_on(struct generic_pm_domain *genpd)
+{
+ struct bcm63xx_power_dev *pmd = container_of(genpd,
+ struct bcm63xx_power_dev, genpd);
+
+ return bcm63xx_power_set_state(pmd, true);
+}
+
+static int bcm63xx_power_off(struct generic_pm_domain *genpd)
+{
+ struct bcm63xx_power_dev *pmd = container_of(genpd,
+ struct bcm63xx_power_dev, genpd);
+
+ return bcm63xx_power_set_state(pmd, false);
+}
+
+static int bcm63xx_power_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ const struct bcm63xx_power_data *entry, *table;
+ struct bcm63xx_power *power;
+ unsigned int ndom;
+ uint8_t max_bit = 0;
+ int ret;
+
+ power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ power->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(power->base))
+ return PTR_ERR(power->base);
+
+ table = of_device_get_match_data(dev);
+ if (!table)
+ return -EINVAL;
+
+ power->genpd_data.num_domains = 0;
+ ndom = 0;
+ for (entry = table; entry->name; entry++) {
+ max_bit = max(max_bit, entry->bit);
+ ndom++;
+ }
+
+ if (!ndom)
+ return -ENODEV;
+
+ power->genpd_data.num_domains = max_bit + 1;
+
+ power->dev = devm_kcalloc(dev, power->genpd_data.num_domains,
+ sizeof(struct bcm63xx_power_dev),
+ GFP_KERNEL);
+ if (!power->dev)
+ return -ENOMEM;
+
+ power->genpd = devm_kcalloc(dev, power->genpd_data.num_domains,
+ sizeof(struct generic_pm_domain *),
+ GFP_KERNEL);
+ if (!power->genpd)
+ return -ENOMEM;
+
+ power->genpd_data.domains = power->genpd;
+
+ ndom = 0;
+ for (entry = table; entry->name; entry++) {
+ struct bcm63xx_power_dev *pmd = &power->dev[ndom];
+ bool is_on;
+
+ pmd->power = power;
+ pmd->mask = BIT(entry->bit);
+ pmd->genpd.name = entry->name;
+ pmd->genpd.flags = entry->flags;
+
+ ret = bcm63xx_power_get_state(pmd, &is_on);
+ if (ret)
+ dev_warn(dev, "unable to get current state for %s\n",
+ pmd->genpd.name);
+
+ pmd->genpd.power_on = bcm63xx_power_on;
+ pmd->genpd.power_off = bcm63xx_power_off;
+
+ pm_genpd_init(&pmd->genpd, NULL, !is_on);
+ power->genpd[entry->bit] = &pmd->genpd;
+
+ ndom++;
+ }
+
+ spin_lock_init(&power->lock);
+
+ ret = of_genpd_add_provider_onecell(np, &power->genpd_data);
+ if (ret) {
+ dev_err(dev, "failed to register genpd driver: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "registered %u power domains\n", ndom);
+
+ return 0;
+}
+
+static const struct bcm63xx_power_data bcm6318_power_domains[] = {
+ {
+ .name = "pcie",
+ .bit = BCM6318_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "usb",
+ .bit = BCM6318_POWER_DOMAIN_USB,
+ }, {
+ .name = "ephy0",
+ .bit = BCM6318_POWER_DOMAIN_EPHY0,
+ }, {
+ .name = "ephy1",
+ .bit = BCM6318_POWER_DOMAIN_EPHY1,
+ }, {
+ .name = "ephy2",
+ .bit = BCM6318_POWER_DOMAIN_EPHY2,
+ }, {
+ .name = "ephy3",
+ .bit = BCM6318_POWER_DOMAIN_EPHY3,
+ }, {
+ .name = "ldo2p5",
+ .bit = BCM6318_POWER_DOMAIN_LDO2P5,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "ldo2p9",
+ .bit = BCM6318_POWER_DOMAIN_LDO2P9,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "sw1p0",
+ .bit = BCM6318_POWER_DOMAIN_SW1P0,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "pad",
+ .bit = BCM6318_POWER_DOMAIN_PAD,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct bcm63xx_power_data bcm6328_power_domains[] = {
+ {
+ .name = "adsl2-mips",
+ .bit = BCM6328_POWER_DOMAIN_ADSL2_MIPS,
+ }, {
+ .name = "adsl2-phy",
+ .bit = BCM6328_POWER_DOMAIN_ADSL2_PHY,
+ }, {
+ .name = "adsl2-afe",
+ .bit = BCM6328_POWER_DOMAIN_ADSL2_AFE,
+ }, {
+ .name = "sar",
+ .bit = BCM6328_POWER_DOMAIN_SAR,
+ }, {
+ .name = "pcm",
+ .bit = BCM6328_POWER_DOMAIN_PCM,
+ }, {
+ .name = "usbd",
+ .bit = BCM6328_POWER_DOMAIN_USBD,
+ }, {
+ .name = "usbh",
+ .bit = BCM6328_POWER_DOMAIN_USBH,
+ }, {
+ .name = "pcie",
+ .bit = BCM6328_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "robosw",
+ .bit = BCM6328_POWER_DOMAIN_ROBOSW,
+ }, {
+ .name = "ephy",
+ .bit = BCM6328_POWER_DOMAIN_EPHY,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct bcm63xx_power_data bcm6362_power_domains[] = {
+ {
+ .name = "sar",
+ .bit = BCM6362_POWER_DOMAIN_SAR,
+ }, {
+ .name = "ipsec",
+ .bit = BCM6362_POWER_DOMAIN_IPSEC,
+ }, {
+ .name = "mips",
+ .bit = BCM6362_POWER_DOMAIN_MIPS,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "dect",
+ .bit = BCM6362_POWER_DOMAIN_DECT,
+ }, {
+ .name = "usbh",
+ .bit = BCM6362_POWER_DOMAIN_USBH,
+ }, {
+ .name = "usbd",
+ .bit = BCM6362_POWER_DOMAIN_USBD,
+ }, {
+ .name = "robosw",
+ .bit = BCM6362_POWER_DOMAIN_ROBOSW,
+ }, {
+ .name = "pcm",
+ .bit = BCM6362_POWER_DOMAIN_PCM,
+ }, {
+ .name = "periph",
+ .bit = BCM6362_POWER_DOMAIN_PERIPH,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "adsl-phy",
+ .bit = BCM6362_POWER_DOMAIN_ADSL_PHY,
+ }, {
+ .name = "gmii-pads",
+ .bit = BCM6362_POWER_DOMAIN_GMII_PADS,
+ }, {
+ .name = "fap",
+ .bit = BCM6362_POWER_DOMAIN_FAP,
+ }, {
+ .name = "pcie",
+ .bit = BCM6362_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "wlan-pads",
+ .bit = BCM6362_POWER_DOMAIN_WLAN_PADS,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct bcm63xx_power_data bcm63268_power_domains[] = {
+ {
+ .name = "sar",
+ .bit = BCM63268_POWER_DOMAIN_SAR,
+ }, {
+ .name = "ipsec",
+ .bit = BCM63268_POWER_DOMAIN_IPSEC,
+ }, {
+ .name = "mips",
+ .bit = BCM63268_POWER_DOMAIN_MIPS,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "dect",
+ .bit = BCM63268_POWER_DOMAIN_DECT,
+ }, {
+ .name = "usbh",
+ .bit = BCM63268_POWER_DOMAIN_USBH,
+ }, {
+ .name = "usbd",
+ .bit = BCM63268_POWER_DOMAIN_USBD,
+ }, {
+ .name = "robosw",
+ .bit = BCM63268_POWER_DOMAIN_ROBOSW,
+ }, {
+ .name = "pcm",
+ .bit = BCM63268_POWER_DOMAIN_PCM,
+ }, {
+ .name = "periph",
+ .bit = BCM63268_POWER_DOMAIN_PERIPH,
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ }, {
+ .name = "vdsl-phy",
+ .bit = BCM63268_POWER_DOMAIN_VDSL_PHY,
+ }, {
+ .name = "vdsl-mips",
+ .bit = BCM63268_POWER_DOMAIN_VDSL_MIPS,
+ }, {
+ .name = "fap",
+ .bit = BCM63268_POWER_DOMAIN_FAP,
+ }, {
+ .name = "pcie",
+ .bit = BCM63268_POWER_DOMAIN_PCIE,
+ }, {
+ .name = "wlan-pads",
+ .bit = BCM63268_POWER_DOMAIN_WLAN_PADS,
+ }, {
+ /* sentinel */
+ },
+};
+
+static const struct of_device_id bcm63xx_power_of_match[] = {
+ {
+ .compatible = "brcm,bcm6318-power-controller",
+ .data = &bcm6318_power_domains,
+ }, {
+ .compatible = "brcm,bcm6328-power-controller",
+ .data = &bcm6328_power_domains,
+ }, {
+ .compatible = "brcm,bcm6362-power-controller",
+ .data = &bcm6362_power_domains,
+ }, {
+ .compatible = "brcm,bcm63268-power-controller",
+ .data = &bcm63268_power_domains,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver bcm63xx_power_driver = {
+ .driver = {
+ .name = "bcm63xx-power-controller",
+ .of_match_table = bcm63xx_power_of_match,
+ },
+ .probe = bcm63xx_power_probe,
+};
+builtin_platform_driver(bcm63xx_power_driver);
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index d326915..7f8dc30 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -13,6 +13,22 @@
#include <linux/syscore_ops.h>
#include <linux/soc/brcmstb/brcmstb.h>
+#define RACENPREF_MASK 0x3
+#define RACPREFINST_SHIFT 0
+#define RACENINST_SHIFT 2
+#define RACPREFDATA_SHIFT 4
+#define RACENDATA_SHIFT 6
+#define RAC_CPU_SHIFT 8
+#define RACCFG_MASK 0xff
+#define DPREF_LINE_2_SHIFT 24
+#define DPREF_LINE_2_MASK 0xff
+
+/* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
+#define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
+ RACENPREF_MASK << RACENINST_SHIFT | \
+ 1 << RACPREFDATA_SHIFT | \
+ RACENPREF_MASK << RACENDATA_SHIFT)
+
#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
#define CPU_CREDIT_REG_MCPx_READ_CRED_MASK 0xf
#define CPU_CREDIT_REG_MCPx_WRITE_CRED_MASK 0xf
@@ -31,11 +47,21 @@
static bool mcp_wr_pairing_en;
static const int *cpubiuctrl_regs;
+enum cpubiuctrl_regs {
+ CPU_CREDIT_REG = 0,
+ CPU_MCP_FLOW_REG,
+ CPU_WRITEBACK_CTRL_REG,
+ RAC_CONFIG0_REG,
+ RAC_CONFIG1_REG,
+ NUM_CPU_BIUCTRL_REGS,
+};
+
static inline u32 cbc_readl(int reg)
{
int offset = cpubiuctrl_regs[reg];
- if (offset == -1)
+ if (offset == -1 ||
+ (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
return (u32)-1;
return readl_relaxed(cpubiuctrl_base + offset);
@@ -45,38 +71,45 @@
{
int offset = cpubiuctrl_regs[reg];
- if (offset == -1)
+ if (offset == -1 ||
+ (IS_ENABLED(CONFIG_CACHE_B15_RAC) && reg >= RAC_CONFIG0_REG))
return;
writel(val, cpubiuctrl_base + offset);
}
-enum cpubiuctrl_regs {
- CPU_CREDIT_REG = 0,
- CPU_MCP_FLOW_REG,
- CPU_WRITEBACK_CTRL_REG
-};
-
static const int b15_cpubiuctrl_regs[] = {
[CPU_CREDIT_REG] = 0x184,
[CPU_MCP_FLOW_REG] = -1,
[CPU_WRITEBACK_CTRL_REG] = -1,
+ [RAC_CONFIG0_REG] = -1,
+ [RAC_CONFIG1_REG] = -1,
};
-/* Odd cases, e.g: 7260 */
+/* Odd cases, e.g: 7260A0 */
static const int b53_cpubiuctrl_no_wb_regs[] = {
[CPU_CREDIT_REG] = 0x0b0,
[CPU_MCP_FLOW_REG] = 0x0b4,
[CPU_WRITEBACK_CTRL_REG] = -1,
+ [RAC_CONFIG0_REG] = 0x78,
+ [RAC_CONFIG1_REG] = 0x7c,
};
static const int b53_cpubiuctrl_regs[] = {
[CPU_CREDIT_REG] = 0x0b0,
[CPU_MCP_FLOW_REG] = 0x0b4,
[CPU_WRITEBACK_CTRL_REG] = 0x22c,
+ [RAC_CONFIG0_REG] = 0x78,
+ [RAC_CONFIG1_REG] = 0x7c,
};
-#define NUM_CPU_BIUCTRL_REGS 3
+static const int a72_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x18,
+ [CPU_MCP_FLOW_REG] = 0x1c,
+ [CPU_WRITEBACK_CTRL_REG] = 0x20,
+ [RAC_CONFIG0_REG] = 0x08,
+ [RAC_CONFIG1_REG] = 0x0c,
+};
static int __init mcp_write_pairing_set(void)
{
@@ -101,25 +134,86 @@
return 0;
}
-static const u32 b53_mach_compat[] = {
+static const u32 a72_b53_mach_compat[] = {
+ 0x7211,
+ 0x7216,
+ 0x72164,
+ 0x72165,
+ 0x7255,
+ 0x7260,
0x7268,
0x7271,
0x7278,
};
-static void __init mcp_b53_set(void)
+/* The read-ahead cache present in the Brahma-B53 CPU is a special piece of
+ * hardware after the integrated L2 cache of the B53 CPU complex whose purpose
+ * is to prefetch instruction and/or data with a line size of either 64 bytes
+ * or 256 bytes. The rationale is that the data-bus of the CPU interface is
+ * optimized for 256-byte transactions, and enabling the read-ahead cache
+ * provides a significant performance boost (typically twice the performance
+ * for a memcpy benchmark application).
+ *
+ * The read-ahead cache is transparent for Virtual Address cache maintenance
+ * operations: IC IVAU, DC IVAC, DC CVAC, DC CVAU and DC CIVAC. So no special
+ * handling is needed for the DMA API above and beyond what is included in the
+ * arm64 implementation.
+ *
+ * In addition, since the Point of Unification is typically between L1 and L2
+ * for the Brahma-B53 processor no special read-ahead cache handling is needed
+ * for the IC IALLU and IC IALLUIS cache maintenance operations.
+ *
+ * However, it is not possible to specify the cache level (L3) for the cache
+ * maintenance instructions operating by set/way to operate on the read-ahead
+ * cache. The read-ahead cache will maintain coherency when inner cache lines
+ * are cleaned by set/way, but if it is necessary to invalidate inner cache
+ * lines by set/way to maintain coherency with system masters operating on
+ * shared memory that does not have hardware support for coherency, then it
+ * will also be necessary to explicitly invalidate the read-ahead cache.
+ */
+static void __init a72_b53_rac_enable_all(struct device_node *np)
+{
+ unsigned int cpu;
+ u32 enable = 0, pref_dist, shift;
+
+ if (IS_ENABLED(CONFIG_CACHE_B15_RAC))
+ return;
+
+ if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
+ return;
+
+ pref_dist = cbc_readl(RAC_CONFIG1_REG);
+ for_each_possible_cpu(cpu) {
+ shift = cpu * RAC_CPU_SHIFT + RACPREFDATA_SHIFT;
+ enable |= RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT);
+ if (cpubiuctrl_regs == a72_cpubiuctrl_regs) {
+ enable &= ~(RACENPREF_MASK << shift);
+ enable |= 3 << shift;
+ pref_dist |= 1 << (cpu + DPREF_LINE_2_SHIFT);
+ }
+ }
+
+ cbc_writel(enable, RAC_CONFIG0_REG);
+ cbc_writel(pref_dist, RAC_CONFIG1_REG);
+
+ pr_info("%pOF: Broadcom %s read-ahead cache\n",
+ np, cpubiuctrl_regs == a72_cpubiuctrl_regs ?
+ "Cortex-A72" : "Brahma-B53");
+}
+
+static void __init mcp_a72_b53_set(void)
{
unsigned int i;
u32 reg;
reg = brcmstb_get_family_id();
- for (i = 0; i < ARRAY_SIZE(b53_mach_compat); i++) {
- if (BRCM_ID(reg) == b53_mach_compat[i])
+ for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) {
+ if (BRCM_ID(reg) == a72_b53_mach_compat[i])
break;
}
- if (i == ARRAY_SIZE(b53_mach_compat))
+ if (i == ARRAY_SIZE(a72_b53_mach_compat))
return;
/* Set all 3 MCP interfaces to 8 credits */
@@ -157,6 +251,7 @@
static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
{
struct device_node *cpu_dn;
+ u32 family_id;
int ret = 0;
cpubiuctrl_base = of_iomap(np, 0);
@@ -179,13 +274,16 @@
cpubiuctrl_regs = b15_cpubiuctrl_regs;
else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
cpubiuctrl_regs = b53_cpubiuctrl_regs;
+ else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72"))
+ cpubiuctrl_regs = a72_cpubiuctrl_regs;
else {
pr_err("unsupported CPU\n");
ret = -EINVAL;
}
of_node_put(cpu_dn);
- if (BRCM_ID(brcmstb_get_family_id()) == 0x7260)
+ family_id = brcmstb_get_family_id();
+ if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
of_node_put(np);
@@ -248,7 +346,8 @@
return ret;
}
- mcp_b53_set();
+ a72_b53_rac_enable_all(np);
+ mcp_a72_b53_set();
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
#endif
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index f9ad8ad..4df32bc 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -40,4 +40,14 @@
/dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
which can be used to dump the Management Complex and AIOP
firmware logs.
+
+config FSL_RCPM
+ bool "Freescale RCPM support"
+ depends on PM_SLEEP && (ARM || ARM64)
+ help
+ The NXP QorIQ Processors based on ARM Core have RCPM module
+ (Run Control and Power Management), which performs all device-level
+ tasks associated with power management, such as wakeup source control.
+ Note that currently this driver will not support PowerPC based
+ QorIQ processor.
endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 71dee8d..906f1cd 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
+obj-$(CONFIG_FSL_RCPM) += rcpm.o
obj-$(CONFIG_FSL_GUTS) += guts.o
obj-$(CONFIG_FSL_MC_DPIO) += dpio/
obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
index 27243f7..5391741 100644
--- a/drivers/soc/fsl/dpaa2-console.c
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -231,6 +231,7 @@
cd->cur_ptr += bytes;
written += bytes;
+ kfree(kbuf);
return written;
err_free_buf:
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 518a8e0..779c319 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <linux/types.h>
@@ -58,8 +58,8 @@
* If cpu == -1, choose the current cpu, with no guarantees about
* potentially being migrated away.
*/
- if (unlikely(cpu < 0))
- cpu = smp_processor_id();
+ if (cpu < 0)
+ cpu = raw_smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */
return dpio_by_cpu[cpu];
@@ -70,6 +70,10 @@
if (d)
return d;
+ d = service_select_by_cpu(d, -1);
+ if (d)
+ return d;
+
spin_lock(&dpio_list_lock);
d = list_entry(dpio_list.next, struct dpaa2_io, node);
list_del(&d->node);
@@ -433,6 +437,78 @@
EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
/**
+ * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
+ * to a frame queue using one fqid.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
+
+/**
+ * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
+ * to different frame queue using a list of fqids.
+ * @d: the given DPIO service.
+ * @fqid: the given list of frame queue ids.
+ * @fd: the frame descriptor which is enqueued.
+ * @nb: number of frames to be enqueud
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
+ u32 *fqid,
+ const struct dpaa2_fd *fd,
+ int nb)
+{
+ struct qbman_eq_desc *ed;
+ int i, ret;
+
+ ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ d = service_select(d);
+ if (!d) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < nb; i++) {
+ qbman_eq_desc_clear(&ed[i]);
+ qbman_eq_desc_set_no_orp(&ed[i], 0);
+ qbman_eq_desc_set_fq(&ed[i], fqid[i]);
+ }
+
+ ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
+out:
+ kfree(ed);
+ return ret;
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
+
+/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
@@ -526,7 +602,7 @@
/**
* dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
- * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
* @dev: the device to allow mapping/unmapping the DMAable region.
*
* The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
@@ -541,7 +617,7 @@
struct dpaa2_io_store *ret;
size_t size;
- if (!max_frames || (max_frames > 16))
+ if (!max_frames || (max_frames > 32))
return NULL;
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index c66f5b7..3d69f56 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -1,24 +1,18 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2019 NXP
*
*/
#include <asm/cacheflush.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <soc/fsl/dpaa2-global.h>
#include "qbman-portal.h"
-#define QMAN_REV_4000 0x04000000
-#define QMAN_REV_4100 0x04010000
-#define QMAN_REV_4101 0x04010001
-#define QMAN_REV_5000 0x05000000
-
-#define QMAN_REV_MASK 0xffff0000
-
/* All QBMan command and result structures use this "valid bit" encoding */
#define QB_VALID_BIT ((u32)0x80)
@@ -28,6 +22,7 @@
/* CINH register offsets */
#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
#define QBMAN_CINH_SWP_EQAR 0x8c0
#define QBMAN_CINH_SWP_CR_RT 0x900
#define QBMAN_CINH_SWP_VDQCR_RT 0x940
@@ -51,6 +46,8 @@
#define QBMAN_CENA_SWP_CR 0x600
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
/* CENA register offsets in memory-backed mode */
#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
@@ -78,6 +75,12 @@
/* opaque token for static dequeues */
#define QMAN_SDQCR_TOKEN 0xbb
+#define QBMAN_EQCR_DCA_IDXMASK 0x0f
+#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
+
+#define EQ_DESC_SIZE_WITHOUT_FD 29
+#define EQ_DESC_SIZE_FD_START 32
+
enum qbman_sdqcr_dct {
qbman_sdqcr_dct_null = 0,
qbman_sdqcr_dct_prio_ics,
@@ -90,6 +93,82 @@
qbman_sdqcr_fc_up_to_3 = 1
};
+/* Internal Function declaration */
+static int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+static int
+qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+static int qbman_swp_pull_direct(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+static int qbman_swp_pull_mem_back(struct qbman_swp *s,
+ struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
+
+static int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+static int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Function pointers */
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+ = qbman_swp_enqueue_direct;
+
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_direct;
+
+int
+(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+ = qbman_swp_enqueue_multiple_desc_direct;
+
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
+ = qbman_swp_pull_direct;
+
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
+ = qbman_swp_dqrr_next_direct;
+
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+ = qbman_swp_release_direct;
+
/* Portal Access */
static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
@@ -146,6 +225,15 @@
#define QMAN_RT_MODE 0x00000100
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ else
+ return (2 * ringsize) - (first - last);
+}
+
/**
* qbman_swp_init() - Create a functional object representing the given
* QBMan portal descriptor.
@@ -156,11 +244,16 @@
*/
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
- struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
u32 reg;
+ u32 mask_size;
+ u32 eqcr_pi;
if (!p)
return NULL;
+
+ spin_lock_init(&p->access_spinlock);
+
p->desc = d;
p->mc.valid_bit = QB_VALID_BIT;
p->sdq = 0;
@@ -186,25 +279,38 @@
p->addr_cena = d->cena_bar;
p->addr_cinh = d->cinh_bar;
- if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
- memset(p->addr_cena, 0, 64 * 1024);
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
- 1, /* Writes Non-cacheable */
- 0, /* EQCR_CI stashing threshold */
- 3, /* RPM: Valid bit mode, RCR in array mode */
- 2, /* DCM: Discrete consumption ack mode */
- 3, /* EPM: Valid bit mode, EQCR in array mode */
- 1, /* mem stashing drop enable == TRUE */
- 1, /* mem stashing priority == TRUE */
- 1, /* mem stashing enable == TRUE */
- 1, /* dequeue stashing priority == TRUE */
- 0, /* dequeue stashing enable == FALSE */
- 0); /* EQCR_CI stashing priority == FALSE */
- if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 2, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable enable */
+ 0); /* EQCR_CI stashing priority enable */
+ } else {
+ memset(p->addr_cena, 0, 64 * 1024);
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 1, /* EQCR_CI stashing threshold */
+ 3, /* RPM: RCR in array mode */
+ 2, /* DCM: Discrete consumption ack */
+ 0, /* EPM: EQCR in ring mode */
+ 1, /* mem stashing drop enable */
+ 1, /* mem stashing priority enable */
+ 1, /* mem stashing enable */
+ 1, /* dequeue stashing priority enable */
+ 0, /* dequeue stashing enable */
+ 0); /* EQCR_CI stashing priority enable */
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
+ }
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@@ -225,6 +331,30 @@
* applied when dequeues from a specific channel are enabled.
*/
qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+
+ p->eqcr.pi_ring_size = 8;
+ if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ p->eqcr.pi_ring_size = 32;
+ qbman_swp_enqueue_ptr =
+ qbman_swp_enqueue_mem_back;
+ qbman_swp_enqueue_multiple_ptr =
+ qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_desc_ptr =
+ qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
+ qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
+ qbman_swp_release_ptr = qbman_swp_release_mem_back;
+ }
+
+ for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
+ p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
+ eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
+ & p->eqcr.pi_ci_mask;
+ p->eqcr.available = p->eqcr.pi_ring_size;
+
return p;
}
@@ -378,6 +508,7 @@
#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
/**
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
@@ -441,20 +572,9 @@
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
-static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
- u8 idx)
-{
- if (idx < 16)
- qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
- QMAN_RT_MODE);
- else
- qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
- (idx - 16) * 4,
- QMAN_RT_MODE);
-}
-
+#define QB_RT_BIT ((u32)0x100)
/**
- * qbman_swp_enqueue() - Issue an enqueue command
+ * qbman_swp_enqueue_direct() - Issue an enqueue command
* @s: the software portal used for enqueue
* @d: the enqueue descriptor
* @fd: the frame descriptor to be enqueued
@@ -464,30 +584,345 @@
*
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/
-int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
- const struct dpaa2_fd *fd)
+static
+int qbman_swp_enqueue_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
{
- struct qbman_eq_desc *p;
- u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
- if (!EQAR_SUCCESS(eqar))
- return -EBUSY;
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
- memcpy(&p->dca, &d->dca, 31);
- memcpy(&p->fd, fd, sizeof(*fd));
+/**
+ * qbman_swp_enqueue_mem_back() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static
+int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ int flags = 0;
+ int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- /* Set the verb byte, have to substitute in the valid-bit */
- dma_wmb();
- p->verb = d->verb | EQAR_VB(eqar);
- } else {
- p->verb = d->verb | EQAR_VB(eqar);
- dma_wmb();
- qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
+ if (ret >= 0)
+ ret = 0;
+ else
+ ret = -EBUSY;
+ return ret;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)d;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ spin_lock(&s->access_spinlock);
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
+
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock(&s->access_spinlock);
+ return 0;
+ }
}
- return 0;
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+ spin_unlock(&s->access_spinlock);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = (uint32_t *)(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&s->access_spinlock, irq_flags);
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
+ s->eqcr.ci = *p & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available) {
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
+ return 0;
+ }
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+ spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ /* Flush all the cacheline without load/store in between */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++)
+ eqcr_pi++;
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static
+int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
+ s->eqcr.ci = *p & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ /* Skip copying the verb */
+ memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = (uint32_t *)(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
+ (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
+
+ return num_enqueued;
}
/* Static (push) dequeue */
@@ -645,7 +1080,7 @@
}
/**
- * qbman_swp_pull() - Issue the pull dequeue command
+ * qbman_swp_pull_direct() - Issue the pull dequeue command
* @s: the software portal object
* @d: the software portal descriptor which has been configured with
* the set of qbman_pull_desc_set_*() calls
@@ -653,7 +1088,44 @@
* Return 0 for success, and -EBUSY if the software portal is not ready
* to do pull dequeue.
*/
-int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+static
+int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
+ }
+ s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
+ if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ else
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+ dma_wmb();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_pull_mem_back() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static
+int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
{
struct qbman_pull_desc *p;
@@ -672,17 +1144,11 @@
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- dma_wmb();
- /* Set the verb byte, have to substitute in the valid-bit */
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
- } else {
- p->verb = d->verb | s->vdq.valid_bit;
- s->vdq.valid_bit ^= QB_VALID_BIT;
- dma_wmb();
- qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
- }
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
return 0;
}
@@ -690,14 +1156,14 @@
#define QMAN_DQRR_PI_MASK 0xf
/**
- * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
* @s: the software portal object
*
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
* only once, so repeated calls can return a sequence of DQRR entries, without
* requiring they be consumed immediately or in any particular order.
*/
-const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
{
u32 verb;
u32 response_verb;
@@ -740,10 +1206,99 @@
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
- else
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb;
/*
@@ -872,7 +1427,7 @@
#define RAR_SUCCESS(rar) ((rar) & 0x100)
/**
- * qbman_swp_release() - Issue a buffer release command
+ * qbman_swp_release_direct() - Issue a buffer release command
* @s: the software portal object
* @d: the release descriptor
* @buffers: a pointer pointing to the buffer address to be released
@@ -880,8 +1435,9 @@
*
* Return 0 for success, -EBUSY if the release command ring is not ready.
*/
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const u64 *buffers, unsigned int num_buffers)
+int qbman_swp_release_direct(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
{
int i;
struct qbman_release_desc *p;
@@ -895,28 +1451,59 @@
return -EBUSY;
/* Start the release command */
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
- else
- p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+
/* Copy the caller's buffer pointers to the command */
for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
- if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
- /*
- * Set the verb byte, have to substitute in the valid-bit
- * and the number of buffers.
- */
- dma_wmb();
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
- } else {
- p->verb = d->verb | RAR_VB(rar) | num_buffers;
- dma_wmb();
- qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
- RAR_IDX(rar) * 4, QMAN_RT_MODE);
- }
+ /*
+ * Set the verb byte, have to substitute in the valid-bit
+ * and the number of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+ return 0;
+}
+
+/**
+ * qbman_swp_release_mem_back() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release_mem_back(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
+
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dma_wmb();
+ qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
+ RAR_IDX(rar) * 4, QMAN_RT_MODE);
return 0;
}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index f3ec5d2..c7c2225 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -9,6 +9,13 @@
#include <soc/fsl/dpaa2-fd.h>
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_5000 0x05000000
+
+#define QMAN_REV_MASK 0xffff0000
+
struct dpaa2_dq;
struct qbman_swp;
@@ -81,6 +88,10 @@
u8 wae;
u8 rspid;
__le64 rsp_addr;
+};
+
+struct qbman_eq_desc_with_fd {
+ struct qbman_eq_desc desc;
u8 fd[32];
};
@@ -132,8 +143,48 @@
u8 dqrr_size;
int reset_bug; /* indicates dqrr reset workaround is needed */
} dqrr;
+
+ struct {
+ u32 pi;
+ u32 pi_vb;
+ u32 pi_ring_size;
+ u32 pi_ci_mask;
+ u32 ci;
+ int available;
+ u32 pend;
+ u32 no_pfdr;
+ } eqcr;
+
+ spinlock_t access_spinlock;
};
+/* Function pointers */
+extern
+int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+extern
+int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames);
+extern
+int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames);
+extern
+int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
+extern
+const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
+extern
+int (*qbman_swp_release_ptr)(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers);
+
+/* Functions */
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
@@ -158,9 +209,6 @@
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
enum qbman_pull_type_e dct);
-int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
-
-const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
@@ -172,15 +220,11 @@
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio);
-int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
- const struct dpaa2_fd *fd);
void qbman_release_desc_clear(struct qbman_release_desc *d);
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
-int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
- const u64 *buffers, unsigned int num_buffers);
int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
unsigned int num_buffers);
int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
@@ -194,6 +238,61 @@
void *qbman_swp_mc_result(struct qbman_swp *p);
/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+static inline int
+qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ return qbman_swp_enqueue_ptr(s, d, fd);
+}
+
+/**
+ * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
+ * using one enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
+}
+
+/**
+ * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
+ * using multiple enqueue descriptor
+ * @s: the software portal used for enqueue
+ * @d: table of minimal enqueue descriptor
+ * @fd: table pointer of frame descriptor table to be enqueued
+ * @num_frames: number of fd to be enqueued
+ *
+ * Return the number of fd enqueued, or a negative error number.
+ */
+static inline int
+qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd,
+ int num_frames)
+{
+ return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
+}
+
+/**
* qbman_result_is_DQ() - check if the dequeue result is a dequeue response
* @dq: the dequeue result to be checked
*
@@ -504,4 +603,49 @@
u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+static inline int qbman_swp_release(struct qbman_swp *s,
+ const struct qbman_release_desc *d,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ return qbman_swp_release_ptr(s, d, buffers, num_buffers);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+static inline int qbman_swp_pull(struct qbman_swp *s,
+ struct qbman_pull_desc *d)
+{
+ return qbman_swp_pull_ptr(s, d);
+}
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ return qbman_swp_dqrr_next_ptr(s);
+}
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 34810f9..091e94c 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -28,7 +28,6 @@
static struct guts *guts;
static struct soc_device_attribute soc_dev_attr;
static struct soc_device *soc_dev;
-static struct device_node *root;
/* SoC die attribute definition for QorIQ platform */
@@ -138,7 +137,7 @@
static int fsl_guts_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct resource *res;
const struct fsl_soc_die_attr *soc_die;
@@ -161,8 +160,14 @@
root = of_find_node_by_path("/");
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
- if (machine)
- soc_dev_attr.machine = machine;
+ if (machine) {
+ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine) {
+ of_node_put(root);
+ return -ENOMEM;
+ }
+ }
+ of_node_put(root);
svr = fsl_guts_get_svr();
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -197,7 +202,6 @@
static int fsl_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
- of_node_put(root);
return 0;
}
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 95f9e48..feb9747 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -449,11 +449,6 @@
return 0;
}
-static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
-{
- return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
-}
-
static inline void qm_eqcr_finish(struct qm_portal *portal)
{
struct qm_eqcr *eqcr = &portal->eqcr;
@@ -1749,6 +1744,13 @@
}
EXPORT_SYMBOL(qman_get_affine_portal);
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+ return (!device_link_add(dev, p->config->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
return __poll_portal_fast(p, limit);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 2895d06..7066b2f 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -86,7 +86,7 @@
len--;
qm_fd_set_param(fd, fmt, off, len);
- fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
+ be32_add_cpu(&fd->cmd, 1);
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index cfa4b29..357c580 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -5,7 +5,8 @@
config QUICC_ENGINE
bool "QUICC Engine (QE) framework support"
- depends on FSL_SOC && PPC32
+ depends on OF && HAS_IOMEM
+ depends on PPC || ARM || ARM64 || COMPILE_TEST
select GENERIC_ALLOCATOR
select CRC32
help
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index f0c29ed..ed75198 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,13 +41,13 @@
container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
- qe_gc->cpdata = in_be32(®s->cpdata);
+ qe_gc->cpdata = qe_ioread32be(®s->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
- qe_gc->saved_regs.cpdir1 = in_be32(®s->cpdir1);
- qe_gc->saved_regs.cpdir2 = in_be32(®s->cpdir2);
- qe_gc->saved_regs.cppar1 = in_be32(®s->cppar1);
- qe_gc->saved_regs.cppar2 = in_be32(®s->cppar2);
- qe_gc->saved_regs.cpodr = in_be32(®s->cpodr);
+ qe_gc->saved_regs.cpdir1 = qe_ioread32be(®s->cpdir1);
+ qe_gc->saved_regs.cpdir2 = qe_ioread32be(®s->cpdir2);
+ qe_gc->saved_regs.cppar1 = qe_ioread32be(®s->cppar1);
+ qe_gc->saved_regs.cppar2 = qe_ioread32be(®s->cppar2);
+ qe_gc->saved_regs.cpodr = qe_ioread32be(®s->cpodr);
}
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -56,7 +56,7 @@
struct qe_pio_regs __iomem *regs = mm_gc->regs;
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
- return !!(in_be32(®s->cpdata) & pin_mask);
+ return !!(qe_ioread32be(®s->cpdata) & pin_mask);
}
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -74,7 +74,7 @@
else
qe_gc->cpdata &= ~pin_mask;
- out_be32(®s->cpdata, qe_gc->cpdata);
+ qe_iowrite32be(qe_gc->cpdata, ®s->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
@@ -101,7 +101,7 @@
}
}
- out_be32(®s->cpdata, qe_gc->cpdata);
+ qe_iowrite32be(qe_gc->cpdata, ®s->cpdata);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
@@ -160,7 +160,6 @@
{
struct qe_pin *qe_pin;
struct gpio_chip *gc;
- struct of_mm_gpio_chip *mm_gc;
struct qe_gpio_chip *qe_gc;
int err;
unsigned long flags;
@@ -186,7 +185,6 @@
goto err0;
}
- mm_gc = to_of_mm_gpio_chip(gc);
qe_gc = gpiochip_get_data(gc);
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -255,11 +253,15 @@
spin_lock_irqsave(&qe_gc->lock, flags);
if (second_reg) {
- clrsetbits_be32(®s->cpdir2, mask2, sregs->cpdir2 & mask2);
- clrsetbits_be32(®s->cppar2, mask2, sregs->cppar2 & mask2);
+ qe_clrsetbits_be32(®s->cpdir2, mask2,
+ sregs->cpdir2 & mask2);
+ qe_clrsetbits_be32(®s->cppar2, mask2,
+ sregs->cppar2 & mask2);
} else {
- clrsetbits_be32(®s->cpdir1, mask2, sregs->cpdir1 & mask2);
- clrsetbits_be32(®s->cppar1, mask2, sregs->cppar1 & mask2);
+ qe_clrsetbits_be32(®s->cpdir1, mask2,
+ sregs->cpdir1 & mask2);
+ qe_clrsetbits_be32(®s->cppar1, mask2,
+ sregs->cppar1 & mask2);
}
if (sregs->cpdata & mask1)
@@ -267,8 +269,8 @@
else
qe_gc->cpdata &= ~mask1;
- out_be32(®s->cpdata, qe_gc->cpdata);
- clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1);
+ qe_iowrite32be(qe_gc->cpdata, ®s->cpdata);
+ qe_clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1);
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 417df7e..2df20d6 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -22,16 +22,12 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/iopoll.h>
#include <linux/crc32.h>
#include <linux/mod_devicetable.h>
#include <linux/of_platform.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
-#include <asm/prom.h>
-#include <asm/rheap.h>
static void qe_snums_init(void);
static int qe_sdma_init(void);
@@ -108,11 +104,12 @@
{
unsigned long flags;
u8 mcn_shift = 0, dev_shift = 0;
- u32 ret;
+ u32 val;
+ int ret;
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
- out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
+ qe_iowrite32be((u32)(cmd | QE_CR_FLG), &qe_immr->cp.cecr);
} else {
if (cmd == QE_ASSIGN_PAGE) {
/* Here device is the SNUM, not sub-block */
@@ -129,20 +126,18 @@
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
}
- out_be32(&qe_immr->cp.cecdr, cmd_input);
- out_be32(&qe_immr->cp.cecr,
- (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
- mcn_protocol << mcn_shift));
+ qe_iowrite32be(cmd_input, &qe_immr->cp.cecdr);
+ qe_iowrite32be((cmd | QE_CR_FLG | ((u32)device << dev_shift) | (u32)mcn_protocol << mcn_shift),
+ &qe_immr->cp.cecr);
}
/* wait for the QE_CR_FLG to clear */
- ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
- 100, 0);
- /* On timeout (e.g. failure), the expression will be false (ret == 0),
- otherwise it will be true (ret == 1). */
+ ret = readx_poll_timeout_atomic(qe_ioread32be, &qe_immr->cp.cecr, val,
+ (val & QE_CR_FLG) == 0, 0, 100);
+ /* On timeout, ret is -ETIMEDOUT, otherwise it will be 0. */
spin_unlock_irqrestore(&qe_lock, flags);
- return ret == 1;
+ return ret == 0;
}
EXPORT_SYMBOL(qe_issue_cmd);
@@ -164,8 +159,7 @@
unsigned int qe_get_brg_clk(void)
{
struct device_node *qe;
- int size;
- const u32 *prop;
+ u32 brg;
unsigned int mod;
if (brg_clk)
@@ -175,9 +169,8 @@
if (!qe)
return brg_clk;
- prop = of_get_property(qe, "brg-frequency", &size);
- if (prop && size == sizeof(*prop))
- brg_clk = *prop;
+ if (!of_property_read_u32(qe, "brg-frequency", &brg))
+ brg_clk = brg;
of_node_put(qe);
@@ -197,6 +190,14 @@
#define PVR_VER_836x 0x8083
#define PVR_VER_832x 0x8084
+static bool qe_general4_errata(void)
+{
+#ifdef CONFIG_PPC32
+ return pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x);
+#endif
+ return false;
+}
+
/* Program the BRG to the given sampling rate and multiplier
*
* @brg: the BRG, QE_BRG1 - QE_BRG16
@@ -223,14 +224,14 @@
/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
that the BRG divisor must be even if you're not using divide-by-16
mode. */
- if (pvr_version_is(PVR_VER_836x) || pvr_version_is(PVR_VER_832x))
+ if (qe_general4_errata())
if (!div16 && (divisor & 1) && (divisor > 3))
divisor++;
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
QE_BRGC_ENABLE | div16;
- out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+ qe_iowrite32be(tempval, &qe_immr->brg.brgc[brg - QE_BRG1]);
return 0;
}
@@ -364,22 +365,20 @@
static int qe_sdma_init(void)
{
struct sdma __iomem *sdma = &qe_immr->sdma;
- static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
-
- if (!sdma)
- return -ENODEV;
+ static s32 sdma_buf_offset = -ENOMEM;
/* allocate 2 internal temporary buffers (512 bytes size each) for
* the SDMA */
- if (IS_ERR_VALUE(sdma_buf_offset)) {
+ if (sdma_buf_offset < 0) {
sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
- if (IS_ERR_VALUE(sdma_buf_offset))
+ if (sdma_buf_offset < 0)
return -ENOMEM;
}
- out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
- (0x1 << QE_SDMR_CEN_SHIFT)));
+ qe_iowrite32be((u32)sdma_buf_offset & QE_SDEBCR_BA_MASK,
+ &sdma->sdebcr);
+ qe_iowrite32be((QE_SDMR_GLB_1_MSK | (0x1 << QE_SDMR_CEN_SHIFT)),
+ &sdma->sdmr);
return 0;
}
@@ -417,14 +416,14 @@
"uploading microcode '%s'\n", ucode->id);
/* Use auto-increment */
- out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
- QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+ qe_iowrite32be(be32_to_cpu(ucode->iram_offset) | QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR,
+ &qe_immr->iram.iadd);
for (i = 0; i < be32_to_cpu(ucode->count); i++)
- out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+ qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
/* Set I-RAM Ready Register */
- out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+ qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
/*
@@ -449,7 +448,7 @@
unsigned int i;
unsigned int j;
u32 crc;
- size_t calc_size = sizeof(struct qe_firmware);
+ size_t calc_size;
size_t length;
const struct qe_header *hdr;
@@ -481,7 +480,7 @@
}
/* Validate the length and check if there's a CRC */
- calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+ calc_size = struct_size(firmware, microcode, firmware->count);
for (i = 0; i < firmware->count; i++)
/*
@@ -509,7 +508,7 @@
* If the microcode calls for it, split the I-RAM.
*/
if (!firmware->split)
- setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+ qe_setbits_be16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
if (firmware->soc.model)
printk(KERN_INFO
@@ -526,7 +525,7 @@
*/
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
- qe_firmware_info.extended_modes = firmware->extended_modes;
+ qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
sizeof(firmware->vtraps));
@@ -543,11 +542,13 @@
u32 trap = be32_to_cpu(ucode->traps[j]);
if (trap)
- out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+ qe_iowrite32be(trap,
+ &qe_immr->rsp[i].tibcr[j]);
}
/* Enable traps */
- out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
+ qe_iowrite32be(be32_to_cpu(ucode->eccr),
+ &qe_immr->rsp[i].eccr);
}
qe_firmware_uploaded = 1;
@@ -565,11 +566,9 @@
struct qe_firmware_info *qe_get_firmware_info(void)
{
static int initialized;
- struct property *prop;
struct device_node *qe;
struct device_node *fw = NULL;
const char *sprop;
- unsigned int i;
/*
* If we haven't checked yet, and a driver hasn't uploaded a firmware
@@ -603,20 +602,11 @@
strlcpy(qe_firmware_info.id, sprop,
sizeof(qe_firmware_info.id));
- prop = of_find_property(fw, "extended-modes", NULL);
- if (prop && (prop->length == sizeof(u64))) {
- const u64 *iprop = prop->value;
+ of_property_read_u64(fw, "extended-modes",
+ &qe_firmware_info.extended_modes);
- qe_firmware_info.extended_modes = *iprop;
- }
-
- prop = of_find_property(fw, "virtual-traps", NULL);
- if (prop && (prop->length == 32)) {
- const u32 *iprop = prop->value;
-
- for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
- qe_firmware_info.vtraps[i] = iprop[i];
- }
+ of_property_read_u32_array(fw, "virtual-traps", qe_firmware_info.vtraps,
+ ARRAY_SIZE(qe_firmware_info.vtraps));
of_node_put(fw);
@@ -627,17 +617,13 @@
unsigned int qe_get_num_of_risc(void)
{
struct device_node *qe;
- int size;
unsigned int num_of_risc = 0;
- const u32 *prop;
qe = qe_get_device_node();
if (!qe)
return num_of_risc;
- prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
- if (prop && size == sizeof(*prop))
- num_of_risc = *prop;
+ of_property_read_u32(qe, "fsl,qe-num-riscs", &num_of_risc);
of_node_put(qe);
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 83e85e6..7507559 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -32,7 +32,7 @@
struct muram_block {
struct list_head head;
- unsigned long start;
+ s32 start;
int size;
};
@@ -46,7 +46,7 @@
{
struct device_node *np;
struct resource r;
- u32 zero[OF_MAX_ADDR_CELLS] = {};
+ __be32 zero[OF_MAX_ADDR_CELLS] = {};
resource_size_t max = 0;
int i = 0;
int ret = 0;
@@ -110,34 +110,30 @@
* @algo: algorithm for alloc.
* @data: data for genalloc's algorithm.
*
- * This function returns an offset into the muram area.
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
*/
-static unsigned long cpm_muram_alloc_common(unsigned long size,
- genpool_algo_t algo, void *data)
+static s32 cpm_muram_alloc_common(unsigned long size,
+ genpool_algo_t algo, void *data)
{
struct muram_block *entry;
- unsigned long start;
+ s32 start;
- if (!muram_pool && cpm_muram_init())
- goto out2;
-
- start = gen_pool_alloc_algo(muram_pool, size, algo, data);
- if (!start)
- goto out2;
- start = start - GENPOOL_OFFSET;
- memset_io(cpm_muram_addr(start), 0, size);
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
- goto out1;
+ return -ENOMEM;
+ start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+ if (!start) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+ start = start - GENPOOL_OFFSET;
+ memset_io(cpm_muram_addr(start), 0, size);
entry->start = start;
entry->size = size;
list_add(&entry->head, &muram_block_list);
return start;
-out1:
- gen_pool_free(muram_pool, start, size);
-out2:
- return (unsigned long)-ENOMEM;
}
/*
@@ -145,13 +141,14 @@
* @size: number of bytes to allocate
* @align: requested alignment, in bytes
*
- * This function returns an offset into the muram area.
+ * This function returns a non-negative offset into the muram area, or
+ * a negative errno on failure.
* Use cpm_dpram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
+s32 cpm_muram_alloc(unsigned long size, unsigned long align)
{
- unsigned long start;
+ s32 start;
unsigned long flags;
struct genpool_data_align muram_pool_data;
@@ -168,12 +165,15 @@
* cpm_muram_free - free a chunk of multi-user ram
* @offset: The beginning of the chunk as returned by cpm_muram_alloc().
*/
-int cpm_muram_free(unsigned long offset)
+void cpm_muram_free(s32 offset)
{
unsigned long flags;
int size;
struct muram_block *tmp;
+ if (offset < 0)
+ return;
+
size = 0;
spin_lock_irqsave(&cpm_muram_lock, flags);
list_for_each_entry(tmp, &muram_block_list, head) {
@@ -186,7 +186,6 @@
}
gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
spin_unlock_irqrestore(&cpm_muram_lock, flags);
- return size;
}
EXPORT_SYMBOL(cpm_muram_free);
@@ -194,13 +193,14 @@
* cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
* @offset: offset of allocation start address
* @size: number of bytes to allocate
- * This function returns an offset into the muram area
+ * This function returns @offset if the area was available, a negative
+ * errno otherwise.
* Use cpm_dpram_addr() to get the virtual address of the area.
* Use cpm_muram_free() to free the allocation.
*/
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
{
- unsigned long start;
+ s32 start;
unsigned long flags;
struct genpool_data_fixed muram_pool_data_fixed;
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index 9bac546..0390af9 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/irq.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/stddef.h>
@@ -24,9 +25,57 @@
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <soc/fsl/qe/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
-#include "qe_ic.h"
+#define NR_QE_IC_INTS 64
+
+/* QE IC registers offset */
+#define QEIC_CICR 0x00
+#define QEIC_CIVEC 0x04
+#define QEIC_CIPXCC 0x10
+#define QEIC_CIPYCC 0x14
+#define QEIC_CIPWCC 0x18
+#define QEIC_CIPZCC 0x1c
+#define QEIC_CIMR 0x20
+#define QEIC_CRIMR 0x24
+#define QEIC_CIPRTA 0x30
+#define QEIC_CIPRTB 0x34
+#define QEIC_CHIVEC 0x60
+
+struct qe_ic {
+ /* Control registers offset */
+ __be32 __iomem *regs;
+
+ /* The remapper for this QEIC */
+ struct irq_domain *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* VIRQ numbers of QE high/low irqs */
+ unsigned int virq_high;
+ unsigned int virq_low;
+};
+
+/*
+ * QE interrupt controller internal structure
+ */
+struct qe_ic_info {
+ /* Location of this source at the QIMR register */
+ u32 mask;
+
+ /* Mask register offset */
+ u32 mask_reg;
+
+ /*
+ * For grouped interrupts sources - the interrupt code as
+ * appears at the group priority register
+ */
+ u8 pri_code;
+
+ /* Group priority register offset */
+ u32 pri_reg;
+};
static DEFINE_RAW_SPINLOCK(qe_ic_lock);
@@ -171,15 +220,15 @@
},
};
-static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
+static inline u32 qe_ic_read(__be32 __iomem *base, unsigned int reg)
{
- return in_be32(base + (reg >> 2));
+ return qe_ioread32be(base + (reg >> 2));
}
-static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
+static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg,
u32 value)
{
- out_be32(base + (reg >> 2), value);
+ qe_iowrite32be(value, base + (reg >> 2));
}
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
@@ -281,8 +330,8 @@
.xlate = irq_domain_xlate_onetwocell,
};
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
{
int irq;
@@ -292,13 +341,13 @@
irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
if (irq == 0)
- return NO_IRQ;
+ return 0;
return irq_linear_revmap(qe_ic->irqhost, irq);
}
-/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+/* Return an interrupt vector or 0 if no interrupt is pending. */
+static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
{
int irq;
@@ -308,18 +357,60 @@
irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
if (irq == 0)
- return NO_IRQ;
+ return 0;
return irq_linear_revmap(qe_ic->irqhost, irq);
}
-void __init qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(struct irq_desc *desc),
- void (*high_handler)(struct irq_desc *desc))
+static void qe_ic_cascade_low(struct irq_desc *desc)
{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_high(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
+{
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
+ unsigned int cascade_irq;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ cascade_irq = qe_ic_get_high_irq(qe_ic);
+ if (cascade_irq == 0)
+ cascade_irq = qe_ic_get_low_irq(qe_ic);
+
+ if (cascade_irq != 0)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void __init qe_ic_init(struct device_node *node)
+{
+ void (*low_handler)(struct irq_desc *desc);
+ void (*high_handler)(struct irq_desc *desc);
struct qe_ic *qe_ic;
struct resource res;
- u32 temp = 0, ret, high_active = 0;
+ u32 ret;
ret = of_address_to_resource(node, 0, &res);
if (ret)
@@ -343,166 +434,42 @@
qe_ic->virq_high = irq_of_parse_and_map(node, 0);
qe_ic->virq_low = irq_of_parse_and_map(node, 1);
- if (qe_ic->virq_low == NO_IRQ) {
+ if (!qe_ic->virq_low) {
printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
kfree(qe_ic);
return;
}
-
- /* default priority scheme is grouped. If spread mode is */
- /* required, configure cicr accordingly. */
- if (flags & QE_IC_SPREADMODE_GRP_W)
- temp |= CICR_GWCC;
- if (flags & QE_IC_SPREADMODE_GRP_X)
- temp |= CICR_GXCC;
- if (flags & QE_IC_SPREADMODE_GRP_Y)
- temp |= CICR_GYCC;
- if (flags & QE_IC_SPREADMODE_GRP_Z)
- temp |= CICR_GZCC;
- if (flags & QE_IC_SPREADMODE_GRP_RISCA)
- temp |= CICR_GRTA;
- if (flags & QE_IC_SPREADMODE_GRP_RISCB)
- temp |= CICR_GRTB;
-
- /* choose destination signal for highest priority interrupt */
- if (flags & QE_IC_HIGH_SIGNAL) {
- temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
- high_active = 1;
+ if (qe_ic->virq_high != qe_ic->virq_low) {
+ low_handler = qe_ic_cascade_low;
+ high_handler = qe_ic_cascade_high;
+ } else {
+ low_handler = qe_ic_cascade_muxed_mpic;
+ high_handler = NULL;
}
- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
+ qe_ic_write(qe_ic->regs, QEIC_CICR, 0);
irq_set_handler_data(qe_ic->virq_low, qe_ic);
irq_set_chained_handler(qe_ic->virq_low, low_handler);
- if (qe_ic->virq_high != NO_IRQ &&
- qe_ic->virq_high != qe_ic->virq_low) {
+ if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) {
irq_set_handler_data(qe_ic->virq_high, qe_ic);
irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
}
-void qe_ic_set_highest_priority(unsigned int virq, int high)
+static int __init qe_ic_of_init(void)
{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- u32 temp = 0;
+ struct device_node *np;
- temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
-
- temp &= ~CICR_HP_MASK;
- temp |= src << CICR_HP_SHIFT;
-
- temp &= ~CICR_HPIT_MASK;
- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
-
- qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
-}
-
-/* Set Priority level within its group, from 1 to 8 */
-int qe_ic_set_priority(unsigned int virq, unsigned int priority)
-{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- u32 temp;
-
- if (priority > 8 || priority == 0)
- return -EINVAL;
- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
- "%s: Invalid hw irq number for QEIC\n", __func__))
- return -EINVAL;
- if (qe_ic_info[src].pri_reg == 0)
- return -EINVAL;
-
- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
-
- if (priority < 4) {
- temp &= ~(0x7 << (32 - priority * 3));
- temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
- } else {
- temp &= ~(0x7 << (24 - priority * 3));
- temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+ if (!np) {
+ np = of_find_node_by_type(NULL, "qeic");
+ if (!np)
+ return -ENODEV;
}
-
- qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
-
+ qe_ic_init(np);
+ of_node_put(np);
return 0;
}
-
-/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
-{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- u32 temp, control_reg = QEIC_CICNR, shift = 0;
-
- if (priority > 2 || priority == 0)
- return -EINVAL;
- if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
- "%s: Invalid hw irq number for QEIC\n", __func__))
- return -EINVAL;
-
- switch (qe_ic_info[src].pri_reg) {
- case QEIC_CIPZCC:
- shift = CICNR_ZCC1T_SHIFT;
- break;
- case QEIC_CIPWCC:
- shift = CICNR_WCC1T_SHIFT;
- break;
- case QEIC_CIPYCC:
- shift = CICNR_YCC1T_SHIFT;
- break;
- case QEIC_CIPXCC:
- shift = CICNR_XCC1T_SHIFT;
- break;
- case QEIC_CIPRTA:
- shift = CRICR_RTA1T_SHIFT;
- control_reg = QEIC_CRICR;
- break;
- case QEIC_CIPRTB:
- shift = CRICR_RTB1T_SHIFT;
- control_reg = QEIC_CRICR;
- break;
- default:
- return -EINVAL;
- }
-
- shift += (2 - priority) * 2;
- temp = qe_ic_read(qe_ic->regs, control_reg);
- temp &= ~(SIGNAL_MASK << shift);
- temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
- qe_ic_write(qe_ic->regs, control_reg, temp);
-
- return 0;
-}
-
-static struct bus_type qe_ic_subsys = {
- .name = "qe_ic",
- .dev_name = "qe_ic",
-};
-
-static struct device device_qe_ic = {
- .id = 0,
- .bus = &qe_ic_subsys,
-};
-
-static int __init init_qe_ic_sysfs(void)
-{
- int rc;
-
- printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
-
- rc = subsys_system_register(&qe_ic_subsys, NULL);
- if (rc) {
- printk(KERN_ERR "Failed registering qe_ic sys class\n");
- return -ENODEV;
- }
- rc = device_register(&device_qe_ic);
- if (rc) {
- printk(KERN_ERR "Failed registering qe_ic sys device\n");
- return -ENODEV;
- }
- return 0;
-}
-
-subsys_initcall(init_qe_ic_sysfs);
+subsys_initcall(qe_ic_of_init);
diff --git a/drivers/soc/fsl/qe/qe_ic.h b/drivers/soc/fsl/qe/qe_ic.h
deleted file mode 100644
index 08c6956..0000000
--- a/drivers/soc/fsl/qe/qe_ic.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * drivers/soc/fsl/qe/qe_ic.h
- *
- * QUICC ENGINE Interrupt Controller Header
- *
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Li Yang <leoli@freescale.com>
- * Based on code from Shlomi Gridish <gridish@freescale.com>
- */
-#ifndef _POWERPC_SYSDEV_QE_IC_H
-#define _POWERPC_SYSDEV_QE_IC_H
-
-#include <soc/fsl/qe/qe_ic.h>
-
-#define NR_QE_IC_INTS 64
-
-/* QE IC registers offset */
-#define QEIC_CICR 0x00
-#define QEIC_CIVEC 0x04
-#define QEIC_CRIPNR 0x08
-#define QEIC_CIPNR 0x0c
-#define QEIC_CIPXCC 0x10
-#define QEIC_CIPYCC 0x14
-#define QEIC_CIPWCC 0x18
-#define QEIC_CIPZCC 0x1c
-#define QEIC_CIMR 0x20
-#define QEIC_CRIMR 0x24
-#define QEIC_CICNR 0x28
-#define QEIC_CIPRTA 0x30
-#define QEIC_CIPRTB 0x34
-#define QEIC_CRICR 0x3c
-#define QEIC_CHIVEC 0x60
-
-/* Interrupt priority registers */
-#define CIPCC_SHIFT_PRI0 29
-#define CIPCC_SHIFT_PRI1 26
-#define CIPCC_SHIFT_PRI2 23
-#define CIPCC_SHIFT_PRI3 20
-#define CIPCC_SHIFT_PRI4 13
-#define CIPCC_SHIFT_PRI5 10
-#define CIPCC_SHIFT_PRI6 7
-#define CIPCC_SHIFT_PRI7 4
-
-/* CICR priority modes */
-#define CICR_GWCC 0x00040000
-#define CICR_GXCC 0x00020000
-#define CICR_GYCC 0x00010000
-#define CICR_GZCC 0x00080000
-#define CICR_GRTA 0x00200000
-#define CICR_GRTB 0x00400000
-#define CICR_HPIT_SHIFT 8
-#define CICR_HPIT_MASK 0x00000300
-#define CICR_HP_SHIFT 24
-#define CICR_HP_MASK 0x3f000000
-
-/* CICNR */
-#define CICNR_WCC1T_SHIFT 20
-#define CICNR_ZCC1T_SHIFT 28
-#define CICNR_YCC1T_SHIFT 12
-#define CICNR_XCC1T_SHIFT 4
-
-/* CRICR */
-#define CRICR_RTA1T_SHIFT 20
-#define CRICR_RTB1T_SHIFT 28
-
-/* Signal indicator */
-#define SIGNAL_MASK 3
-#define SIGNAL_HIGH 2
-#define SIGNAL_LOW 0
-
-struct qe_ic {
- /* Control registers offset */
- volatile u32 __iomem *regs;
-
- /* The remapper for this QEIC */
- struct irq_domain *irqhost;
-
- /* The "linux" controller struct */
- struct irq_chip hc_irq;
-
- /* VIRQ numbers of QE high/low irqs */
- unsigned int virq_high;
- unsigned int virq_low;
-};
-
-/*
- * QE interrupt controller internal structure
- */
-struct qe_ic_info {
- u32 mask; /* location of this source at the QIMR register. */
- u32 mask_reg; /* Mask register offset */
- u8 pri_code; /* for grouped interrupts sources - the interrupt
- code as appears at the group priority register */
- u32 pri_reg; /* Group priority register offset */
-};
-
-#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index 3657e29..1bb46d9 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -18,8 +18,6 @@
#include <asm/io.h>
#include <soc/fsl/qe/qe.h>
-#include <asm/prom.h>
-#include <sysdev/fsl_soc.h>
#undef DEBUG
@@ -30,17 +28,18 @@
{
struct resource res;
int ret;
- const u32 *num_ports;
+ u32 num_ports;
/* Map Parallel I/O ports registers */
ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
- num_ports = of_get_property(np, "num-ports", NULL);
- if (num_ports)
- num_par_io_ports = *num_ports;
+ if (!of_property_read_u32(np, "num-ports", &num_ports))
+ num_par_io_ports = num_ports;
return 0;
}
@@ -57,16 +56,16 @@
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */
- tmp_val = in_be32(&par_io->cpodr);
+ tmp_val = qe_ioread32be(&par_io->cpodr);
if (open_drain)
- out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
+ qe_iowrite32be(pin_mask1bit | tmp_val, &par_io->cpodr);
else
- out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
+ qe_iowrite32be(~pin_mask1bit & tmp_val, &par_io->cpodr);
/* define direction */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- in_be32(&par_io->cpdir2) :
- in_be32(&par_io->cpdir1);
+ qe_ioread32be(&par_io->cpdir2) :
+ qe_ioread32be(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
@@ -78,34 +77,30 @@
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- out_be32(&par_io->cpdir2,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir2);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir2);
} else {
- out_be32(&par_io->cpdir1,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cpdir1);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cpdir1);
}
/* define pin assignment */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
- in_be32(&par_io->cppar2) :
- in_be32(&par_io->cppar1);
+ qe_ioread32be(&par_io->cppar2) :
+ qe_ioread32be(&par_io->cppar1);
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
- out_be32(&par_io->cppar2,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar2);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar2);
} else {
- out_be32(&par_io->cppar1,
- ~pin_mask2bits & tmp_val);
+ qe_iowrite32be(~pin_mask2bits & tmp_val, &par_io->cppar1);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
+ qe_iowrite32be(new_mask2bits | tmp_val, &par_io->cppar1);
}
}
EXPORT_SYMBOL(__par_io_config_pin);
@@ -133,12 +128,12 @@
/* calculate pin location */
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
- tmp_val = in_be32(&par_io[port].cpdata);
+ tmp_val = qe_ioread32be(&par_io[port].cpdata);
if (val == 0) /* clear */
- out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
+ qe_iowrite32be(~pin_mask & tmp_val, &par_io[port].cpdata);
else /* set */
- out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
+ qe_iowrite32be(pin_mask | tmp_val, &par_io[port].cpdata);
return 0;
}
@@ -147,23 +142,20 @@
int par_io_of_config(struct device_node *np)
{
struct device_node *pio;
- const phandle *ph;
int pio_map_len;
- const unsigned int *pio_map;
+ const __be32 *pio_map;
if (par_io == NULL) {
printk(KERN_ERR "par_io not initialized\n");
return -1;
}
- ph = of_get_property(np, "pio-handle", NULL);
- if (ph == NULL) {
+ pio = of_parse_phandle(np, "pio-handle", 0);
+ if (pio == NULL) {
printk(KERN_ERR "pio-handle not available\n");
return -1;
}
- pio = of_find_node_by_phandle(*ph);
-
pio_map = of_get_property(pio, "pio-map", &pio_map_len);
if (pio_map == NULL) {
printk(KERN_ERR "pio-map is not set!\n");
@@ -176,9 +168,15 @@
}
while (pio_map_len > 0) {
- par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
- (int) pio_map[2], (int) pio_map[3],
- (int) pio_map[4], (int) pio_map[5]);
+ u8 port = be32_to_cpu(pio_map[0]);
+ u8 pin = be32_to_cpu(pio_map[1]);
+ int dir = be32_to_cpu(pio_map[2]);
+ int open_drain = be32_to_cpu(pio_map[3]);
+ int assignment = be32_to_cpu(pio_map[4]);
+ int has_irq = be32_to_cpu(pio_map[5]);
+
+ par_io_config_pin(port, pin, dir, open_drain,
+ assignment, has_irq);
pio_map += 6;
pio_map_len -= 6;
}
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index e37ebc3..7d7d78d 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -169,10 +169,10 @@
&siram[siram_entry_id * 32 + 0x200 + i]);
}
- setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
- SIR_LAST);
- setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
- SIR_LAST);
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ qe_setbits_be16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
/* Set SIxMR register */
sixmr = SIMR_SAD(siram_entry_id);
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index 024d239..21dbcd7 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -15,7 +15,6 @@
#include <linux/spinlock.h>
#include <linux/export.h>
-#include <asm/irq.h>
#include <asm/io.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -35,8 +34,8 @@
return -EINVAL;
spin_lock_irqsave(&cmxgcr_lock, flags);
- clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
- ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ qe_clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
spin_unlock_irqrestore(&cmxgcr_lock, flags);
return 0;
@@ -80,8 +79,8 @@
return -EINVAL;
}
- clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
- UCC_GUEMR_SET_RESERVED3 | speed);
+ qe_clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
return 0;
}
@@ -109,9 +108,9 @@
get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift);
if (set)
- setbits32(cmxucr, mask << shift);
+ qe_setbits_be32(cmxucr, mask << shift);
else
- clrbits32(cmxucr, mask << shift);
+ qe_clrbits_be32(cmxucr, mask << shift);
return 0;
}
@@ -207,8 +206,8 @@
if (mode == COMM_DIR_RX)
shift += 4;
- clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
- clock_bits << shift);
+ qe_clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
return 0;
}
@@ -520,11 +519,11 @@
int clock_bits;
u32 shift;
struct qe_mux __iomem *qe_mux_reg;
- __be32 __iomem *cmxs1cr;
+ __be32 __iomem *cmxs1cr;
qe_mux_reg = &qe_immr->qmx;
- if (tdm_num > 7 || tdm_num < 0)
+ if (tdm_num > 7)
return -EINVAL;
/* The communications direction must be RX or TX */
@@ -540,8 +539,8 @@
cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
&qe_mux_reg->cmxsi1cr_h;
- qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
- clock_bits << shift);
+ qe_clrsetbits_be32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
return 0;
}
@@ -633,7 +632,7 @@
{
int source;
u32 shift;
- struct qe_mux *qe_mux_reg;
+ struct qe_mux __iomem *qe_mux_reg;
qe_mux_reg = &qe_immr->qmx;
@@ -650,9 +649,9 @@
shift = ucc_get_tdm_sync_shift(mode, tdm_num);
- qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
- QE_CMXUCR_TX_CLK_SRC_MASK << shift,
- source << shift);
+ qe_clrsetbits_be32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
return 0;
}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
index af4d80e..ad6193e 100644
--- a/drivers/soc/fsl/qe/ucc_fast.c
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -29,41 +29,42 @@
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
+ &uccf->uf_regs->gumr, qe_ioread32be(&uccf->uf_regs->gumr));
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
+ &uccf->uf_regs->upsmr, qe_ioread32be(&uccf->uf_regs->upsmr));
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
+ &uccf->uf_regs->utodr, qe_ioread16be(&uccf->uf_regs->utodr));
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
+ &uccf->uf_regs->udsr, qe_ioread16be(&uccf->uf_regs->udsr));
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
+ &uccf->uf_regs->ucce, qe_ioread32be(&uccf->uf_regs->ucce));
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
+ &uccf->uf_regs->uccm, qe_ioread32be(&uccf->uf_regs->uccm));
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
- &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
+ &uccf->uf_regs->uccs, qe_ioread8(&uccf->uf_regs->uccs));
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
+ &uccf->uf_regs->urfb, qe_ioread32be(&uccf->uf_regs->urfb));
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
+ &uccf->uf_regs->urfs, qe_ioread16be(&uccf->uf_regs->urfs));
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
+ &uccf->uf_regs->urfet, qe_ioread16be(&uccf->uf_regs->urfet));
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
+ &uccf->uf_regs->urfset,
+ qe_ioread16be(&uccf->uf_regs->urfset));
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
+ &uccf->uf_regs->utfb, qe_ioread32be(&uccf->uf_regs->utfb));
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
+ &uccf->uf_regs->utfs, qe_ioread16be(&uccf->uf_regs->utfs));
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
+ &uccf->uf_regs->utfet, qe_ioread16be(&uccf->uf_regs->utfet));
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
+ &uccf->uf_regs->utftt, qe_ioread16be(&uccf->uf_regs->utftt));
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
- &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
+ &uccf->uf_regs->utpt, qe_ioread16be(&uccf->uf_regs->utpt));
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
- &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
+ &uccf->uf_regs->urtry, qe_ioread32be(&uccf->uf_regs->urtry));
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
- &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
+ &uccf->uf_regs->guemr, qe_ioread8(&uccf->uf_regs->guemr));
}
EXPORT_SYMBOL(ucc_fast_dump_regs);
@@ -85,7 +86,7 @@
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
{
- out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
+ qe_iowrite16be(UCC_FAST_TOD, &uccf->uf_regs->utodr);
}
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
@@ -97,7 +98,7 @@
uf_regs = uccf->uf_regs;
/* Enable reception and/or transmission on this UCC. */
- gumr = in_be32(&uf_regs->gumr);
+ gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr |= UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 1;
@@ -106,7 +107,7 @@
gumr |= UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 1;
}
- out_be32(&uf_regs->gumr, gumr);
+ qe_iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_enable);
@@ -118,7 +119,7 @@
uf_regs = uccf->uf_regs;
/* Disable reception and/or transmission on this UCC. */
- gumr = in_be32(&uf_regs->gumr);
+ gumr = qe_ioread32be(&uf_regs->gumr);
if (mode & COMM_DIR_TX) {
gumr &= ~UCC_FAST_GUMR_ENT;
uccf->enabled_tx = 0;
@@ -127,7 +128,7 @@
gumr &= ~UCC_FAST_GUMR_ENR;
uccf->enabled_rx = 0;
}
- out_be32(&uf_regs->gumr, gumr);
+ qe_iowrite32be(gumr, &uf_regs->gumr);
}
EXPORT_SYMBOL(ucc_fast_disable);
@@ -196,6 +197,8 @@
__func__);
return -ENOMEM;
}
+ uccf->ucc_fast_tx_virtual_fifo_base_offset = -1;
+ uccf->ucc_fast_rx_virtual_fifo_base_offset = -1;
/* Fill fast UCC structure */
uccf->uf_info = uf_info;
@@ -259,15 +262,14 @@
gumr |= uf_info->tenc;
gumr |= uf_info->tcrc;
gumr |= uf_info->mode;
- out_be32(&uf_regs->gumr, gumr);
+ qe_iowrite32be(gumr, &uf_regs->gumr);
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
- if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
+ if (uccf->ucc_fast_tx_virtual_fifo_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
__func__);
- uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
@@ -277,24 +279,25 @@
qe_muram_alloc(uf_info->urfs +
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
- if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
+ if (uccf->ucc_fast_rx_virtual_fifo_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
__func__);
- uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
}
/* Set Virtual Fifo registers */
- out_be16(&uf_regs->urfs, uf_info->urfs);
- out_be16(&uf_regs->urfet, uf_info->urfet);
- out_be16(&uf_regs->urfset, uf_info->urfset);
- out_be16(&uf_regs->utfs, uf_info->utfs);
- out_be16(&uf_regs->utfet, uf_info->utfet);
- out_be16(&uf_regs->utftt, uf_info->utftt);
+ qe_iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ qe_iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ qe_iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ qe_iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ qe_iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ qe_iowrite16be(uf_info->utftt, &uf_regs->utftt);
/* utfb, urfb are offsets from MURAM base */
- out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
- out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
+ qe_iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset,
+ &uf_regs->utfb);
+ qe_iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset,
+ &uf_regs->urfb);
/* Mux clocking */
/* Grant Support */
@@ -362,14 +365,14 @@
}
/* Set interrupt mask register at UCC level. */
- out_be32(&uf_regs->uccm, uf_info->uccm_mask);
+ qe_iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
- out_be32(&uf_regs->ucce, 0xffffffff);
+ qe_iowrite32be(0xffffffff, &uf_regs->ucce);
*uccf_ret = uccf;
return 0;
@@ -381,11 +384,8 @@
if (!uccf)
return;
- if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
- qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
-
- if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
- qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+ qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
+ qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
if (uccf->uf_regs)
iounmap(uccf->uf_regs);
diff --git a/drivers/soc/fsl/qe/ucc_slow.c b/drivers/soc/fsl/qe/ucc_slow.c
index 34f0ec3..7e11be4 100644
--- a/drivers/soc/fsl/qe/ucc_slow.c
+++ b/drivers/soc/fsl/qe/ucc_slow.c
@@ -72,13 +72,13 @@
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
- struct ucc_slow *us_regs;
+ struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
/* Enable reception and/or transmission on this UCC. */
- gumr_l = in_be32(&us_regs->gumr_l);
+ gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l |= UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 1;
@@ -87,19 +87,19 @@
gumr_l |= UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 1;
}
- out_be32(&us_regs->gumr_l, gumr_l);
+ qe_iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_enable);
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
- struct ucc_slow *us_regs;
+ struct ucc_slow __iomem *us_regs;
u32 gumr_l;
us_regs = uccs->us_regs;
/* Disable reception and/or transmission on this UCC. */
- gumr_l = in_be32(&us_regs->gumr_l);
+ gumr_l = qe_ioread32be(&us_regs->gumr_l);
if (mode & COMM_DIR_TX) {
gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
uccs->enabled_tx = 0;
@@ -108,7 +108,7 @@
gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
uccs->enabled_rx = 0;
}
- out_be32(&us_regs->gumr_l, gumr_l);
+ qe_iowrite32be(gumr_l, &us_regs->gumr_l);
}
EXPORT_SYMBOL(ucc_slow_disable);
@@ -122,7 +122,7 @@
u32 i;
struct ucc_slow __iomem *us_regs;
u32 gumr;
- struct qe_bd *bd;
+ struct qe_bd __iomem *bd;
u32 id;
u32 command;
int ret = 0;
@@ -154,6 +154,9 @@
__func__);
return -ENOMEM;
}
+ uccs->rx_base_offset = -1;
+ uccs->tx_base_offset = -1;
+ uccs->us_pram_offset = -1;
/* Fill slow UCC structure */
uccs->us_info = us_info;
@@ -165,21 +168,14 @@
return -ENOMEM;
}
- uccs->saved_uccm = 0;
- uccs->p_rx_frame = 0;
us_regs = uccs->us_regs;
- uccs->p_ucce = (u16 *) & (us_regs->ucce);
- uccs->p_uccm = (u16 *) & (us_regs->uccm);
-#ifdef STATISTICS
- uccs->rx_frames = 0;
- uccs->tx_frames = 0;
- uccs->rx_discarded = 0;
-#endif /* STATISTICS */
+ uccs->p_ucce = &us_regs->ucce;
+ uccs->p_uccm = &us_regs->uccm;
/* Get PRAM base */
uccs->us_pram_offset =
qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
- if (IS_ERR_VALUE(uccs->us_pram_offset)) {
+ if (uccs->us_pram_offset < 0) {
printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
ucc_slow_free(uccs);
return -ENOMEM;
@@ -198,7 +194,7 @@
return ret;
}
- out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
+ qe_iowrite16be(us_info->max_rx_buf_length, &uccs->us_pram->mrblr);
INIT_LIST_HEAD(&uccs->confQ);
@@ -206,10 +202,9 @@
uccs->rx_base_offset =
qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
- if (IS_ERR_VALUE(uccs->rx_base_offset)) {
+ if (uccs->rx_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
us_info->rx_bd_ring_len);
- uccs->rx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
}
@@ -217,9 +212,8 @@
uccs->tx_base_offset =
qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
- if (IS_ERR_VALUE(uccs->tx_base_offset)) {
+ if (uccs->tx_base_offset < 0) {
printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
- uccs->tx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
}
@@ -228,27 +222,27 @@
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
/* clear bd buffer */
- out_be32(&bd->buf, 0);
+ qe_iowrite32be(0, &bd->buf);
/* set bd status and length */
- out_be32((u32 *) bd, 0);
+ qe_iowrite32be(0, (u32 __iomem *)bd);
bd++;
}
/* for last BD set Wrap bit */
- out_be32(&bd->buf, 0);
- out_be32((u32 *) bd, cpu_to_be32(T_W));
+ qe_iowrite32be(0, &bd->buf);
+ qe_iowrite32be(T_W, (u32 __iomem *)bd);
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
- out_be32((u32*)bd, 0);
+ qe_iowrite32be(0, (u32 __iomem *)bd);
/* clear bd buffer */
- out_be32(&bd->buf, 0);
+ qe_iowrite32be(0, &bd->buf);
bd++;
}
/* for last BD set Wrap bit */
- out_be32((u32*)bd, cpu_to_be32(R_W));
- out_be32(&bd->buf, 0);
+ qe_iowrite32be(R_W, (u32 __iomem *)bd);
+ qe_iowrite32be(0, &bd->buf);
/* Set GUMR (For more details see the hardware spec.). */
/* gumr_h */
@@ -269,11 +263,11 @@
gumr |= UCC_SLOW_GUMR_H_TXSY;
if (us_info->rtsm)
gumr |= UCC_SLOW_GUMR_H_RTSM;
- out_be32(&us_regs->gumr_h, gumr);
+ qe_iowrite32be(gumr, &us_regs->gumr_h);
/* gumr_l */
- gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
- us_info->diag | us_info->mode;
+ gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
+ (u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
if (us_info->tci)
gumr |= UCC_SLOW_GUMR_L_TCI;
if (us_info->rinv)
@@ -282,18 +276,18 @@
gumr |= UCC_SLOW_GUMR_L_TINV;
if (us_info->tend)
gumr |= UCC_SLOW_GUMR_L_TEND;
- out_be32(&us_regs->gumr_l, gumr);
+ qe_iowrite32be(gumr, &us_regs->gumr_l);
/* Function code registers */
/* if the data is in cachable memory, the 'global' */
/* in the function code should be set. */
- uccs->us_pram->tbmr = UCC_BMR_BO_BE;
- uccs->us_pram->rbmr = UCC_BMR_BO_BE;
+ qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
+ qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
/* rbase, tbase are offsets from MURAM base */
- out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
- out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
+ qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
+ qe_iowrite16be(uccs->tx_base_offset, &uccs->us_pram->tbase);
/* Mux clocking */
/* Grant Support */
@@ -323,14 +317,14 @@
}
/* Set interrupt mask register at UCC level. */
- out_be16(&us_regs->uccm, us_info->uccm_mask);
+ qe_iowrite16be(us_info->uccm_mask, &us_regs->uccm);
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
* as soon as the dam is opened. */
/* Writing '1' clears */
- out_be16(&us_regs->ucce, 0xffff);
+ qe_iowrite16be(0xffff, &us_regs->ucce);
/* Issue QE Init command */
if (us_info->init_tx && us_info->init_rx)
@@ -352,14 +346,9 @@
if (!uccs)
return;
- if (uccs->rx_base_offset)
- qe_muram_free(uccs->rx_base_offset);
-
- if (uccs->tx_base_offset)
- qe_muram_free(uccs->tx_base_offset);
-
- if (uccs->us_pram)
- qe_muram_free(uccs->us_pram_offset);
+ qe_muram_free(uccs->rx_base_offset);
+ qe_muram_free(uccs->tx_base_offset);
+ qe_muram_free(uccs->us_pram_offset);
if (uccs->us_regs)
iounmap(uccs->us_regs);
diff --git a/drivers/soc/fsl/qe/usb.c b/drivers/soc/fsl/qe/usb.c
index 32d8269..890f236 100644
--- a/drivers/soc/fsl/qe/usb.c
+++ b/drivers/soc/fsl/qe/usb.c
@@ -43,7 +43,7 @@
spin_lock_irqsave(&cmxgcr_lock, flags);
- clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+ qe_clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
spin_unlock_irqrestore(&cmxgcr_lock, flags);
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
new file mode 100644
index 0000000..a093dbe
--- /dev/null
+++ b/drivers/soc/fsl/rcpm.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// rcpm.c - Freescale QorIQ RCPM driver
+//
+// Copyright 2019 NXP
+//
+// Author: Ran Wang <ran.wang_1@nxp.com>
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/kernel.h>
+
+#define RCPM_WAKEUP_CELL_MAX_SIZE 7
+
+struct rcpm {
+ unsigned int wakeup_cells;
+ void __iomem *ippdexpcr_base;
+ bool little_endian;
+};
+
+/**
+ * rcpm_pm_prepare - performs device-level tasks associated with power
+ * management, such as programming related to the wakeup source control.
+ * @dev: Device to handle.
+ *
+ */
+static int rcpm_pm_prepare(struct device *dev)
+{
+ int i, ret, idx;
+ void __iomem *base;
+ struct wakeup_source *ws;
+ struct rcpm *rcpm;
+ struct device_node *np = dev->of_node;
+ u32 value[RCPM_WAKEUP_CELL_MAX_SIZE + 1];
+ u32 setting[RCPM_WAKEUP_CELL_MAX_SIZE] = {0};
+
+ rcpm = dev_get_drvdata(dev);
+ if (!rcpm)
+ return -EINVAL;
+
+ base = rcpm->ippdexpcr_base;
+ idx = wakeup_sources_read_lock();
+
+ /* Begin with first registered wakeup source */
+ for_each_wakeup_source(ws) {
+
+ /* skip object which is not attached to device */
+ if (!ws->dev || !ws->dev->parent)
+ continue;
+
+ ret = device_property_read_u32_array(ws->dev->parent,
+ "fsl,rcpm-wakeup", value,
+ rcpm->wakeup_cells + 1);
+
+ /* Wakeup source should refer to current rcpm device */
+ if (ret || (np->phandle != value[0]))
+ continue;
+
+ /* Property "#fsl,rcpm-wakeup-cells" of rcpm node defines the
+ * number of IPPDEXPCR register cells, and "fsl,rcpm-wakeup"
+ * of wakeup source IP contains an integer array: <phandle to
+ * RCPM node, IPPDEXPCR0 setting, IPPDEXPCR1 setting,
+ * IPPDEXPCR2 setting, etc>.
+ *
+ * So we will go thought them to collect setting data.
+ */
+ for (i = 0; i < rcpm->wakeup_cells; i++)
+ setting[i] |= value[i + 1];
+ }
+
+ wakeup_sources_read_unlock(idx);
+
+ /* Program all IPPDEXPCRn once */
+ for (i = 0; i < rcpm->wakeup_cells; i++) {
+ u32 tmp = setting[i];
+ void __iomem *address = base + i * 4;
+
+ if (!tmp)
+ continue;
+
+ /* We can only OR related bits */
+ if (rcpm->little_endian) {
+ tmp |= ioread32(address);
+ iowrite32(tmp, address);
+ } else {
+ tmp |= ioread32be(address);
+ iowrite32be(tmp, address);
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops rcpm_pm_ops = {
+ .prepare = rcpm_pm_prepare,
+};
+
+static int rcpm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct rcpm *rcpm;
+ int ret;
+
+ rcpm = devm_kzalloc(dev, sizeof(*rcpm), GFP_KERNEL);
+ if (!rcpm)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ rcpm->ippdexpcr_base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(rcpm->ippdexpcr_base)) {
+ ret = PTR_ERR(rcpm->ippdexpcr_base);
+ return ret;
+ }
+
+ rcpm->little_endian = device_property_read_bool(
+ &pdev->dev, "little-endian");
+
+ ret = device_property_read_u32(&pdev->dev,
+ "#fsl,rcpm-wakeup-cells", &rcpm->wakeup_cells);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(&pdev->dev, rcpm);
+
+ return 0;
+}
+
+static const struct of_device_id rcpm_of_match[] = {
+ { .compatible = "fsl,qoriq-rcpm-2.1+", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rcpm_of_match);
+
+static struct platform_driver rcpm_driver = {
+ .driver = {
+ .name = "rcpm",
+ .of_match_table = rcpm_of_match,
+ .pm = &rcpm_pm_ops,
+ },
+ .probe = rcpm_probe,
+};
+
+module_platform_driver(rcpm_driver);
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 8aaebf1..05812f8 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -8,13 +8,15 @@
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
-config IMX_SCU_SOC
- bool "i.MX System Controller Unit SoC info support"
- depends on IMX_SCU
+config SOC_IMX8M
+ bool "i.MX8M SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
select SOC_BUS
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
help
- If you say yes here you get support for the NXP i.MX System
- Controller Unit SoC info module, it will provide the SoC info
- like SoC family, ID and revision etc.
+ If you say yes here you get support for the NXP i.MX8M family
+ support, it will provide the SoC info like SoC family,
+ ID and revision etc.
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index cf9ca42..078dc91 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
+ifeq ($(CONFIG_ARM),y)
+obj-$(CONFIG_ARCH_MXC) += soc-imx.o
+endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
-obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
-obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
+obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index b0dffb0..db7e7fc 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -14,6 +14,7 @@
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
#include <dt-bindings/power/imx7-power.h>
#include <dt-bindings/power/imx8mq-power.h>
@@ -486,22 +487,17 @@
domain->regulator = devm_regulator_get_optional(domain->dev, "power");
if (IS_ERR(domain->regulator)) {
- if (PTR_ERR(domain->regulator) != -ENODEV) {
- if (PTR_ERR(domain->regulator) != -EPROBE_DEFER)
- dev_err(domain->dev, "Failed to get domain's regulator\n");
- return PTR_ERR(domain->regulator);
- }
+ if (PTR_ERR(domain->regulator) != -ENODEV)
+ return dev_err_probe(domain->dev, PTR_ERR(domain->regulator),
+ "Failed to get domain's regulator\n");
} else if (domain->voltage) {
regulator_set_voltage(domain->regulator,
domain->voltage, domain->voltage);
}
ret = imx_pgc_get_clocks(domain);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(domain->dev, "Failed to get domain's clocks\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(domain->dev, ret, "Failed to get domain's clocks\n");
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
deleted file mode 100644
index f638e77..0000000
--- a/drivers/soc/imx/soc-imx-scu.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2019 NXP.
- */
-
-#include <dt-bindings/firmware/imx/rsrc.h>
-#include <linux/firmware/imx/sci.h>
-#include <linux/slab.h>
-#include <linux/sys_soc.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-
-#define IMX_SCU_SOC_DRIVER_NAME "imx-scu-soc"
-
-static struct imx_sc_ipc *soc_ipc_handle;
-
-struct imx_sc_msg_misc_get_soc_id {
- struct imx_sc_rpc_msg hdr;
- union {
- struct {
- u32 control;
- u16 resource;
- } __packed req;
- struct {
- u32 id;
- } resp;
- } data;
-} __packed __aligned(4);
-
-struct imx_sc_msg_misc_get_soc_uid {
- struct imx_sc_rpc_msg hdr;
- u32 uid_low;
- u32 uid_high;
-} __packed;
-
-static ssize_t soc_uid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct imx_sc_msg_misc_get_soc_uid msg;
- struct imx_sc_rpc_msg *hdr = &msg.hdr;
- u64 soc_uid;
- int ret;
-
- hdr->ver = IMX_SC_RPC_VERSION;
- hdr->svc = IMX_SC_RPC_SVC_MISC;
- hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
- hdr->size = 1;
-
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
- if (ret) {
- pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
- return ret;
- }
-
- soc_uid = msg.uid_high;
- soc_uid <<= 32;
- soc_uid |= msg.uid_low;
-
- return sprintf(buf, "%016llX\n", soc_uid);
-}
-
-static DEVICE_ATTR_RO(soc_uid);
-
-static int imx_scu_soc_id(void)
-{
- struct imx_sc_msg_misc_get_soc_id msg;
- struct imx_sc_rpc_msg *hdr = &msg.hdr;
- int ret;
-
- hdr->ver = IMX_SC_RPC_VERSION;
- hdr->svc = IMX_SC_RPC_SVC_MISC;
- hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL;
- hdr->size = 3;
-
- msg.data.req.control = IMX_SC_C_ID;
- msg.data.req.resource = IMX_SC_R_SYSTEM;
-
- ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
- if (ret) {
- pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
- return ret;
- }
-
- return msg.data.resp.id;
-}
-
-static int imx_scu_soc_probe(struct platform_device *pdev)
-{
- struct soc_device_attribute *soc_dev_attr;
- struct soc_device *soc_dev;
- int id, ret;
- u32 val;
-
- ret = imx_scu_get_handle(&soc_ipc_handle);
- if (ret)
- return ret;
-
- soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
- GFP_KERNEL);
- if (!soc_dev_attr)
- return -ENOMEM;
-
- soc_dev_attr->family = "Freescale i.MX";
-
- ret = of_property_read_string(of_root,
- "model",
- &soc_dev_attr->machine);
- if (ret)
- return ret;
-
- id = imx_scu_soc_id();
- if (id < 0)
- return -EINVAL;
-
- /* format soc_id value passed from SCU firmware */
- val = id & 0x1f;
- soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", val);
- if (!soc_dev_attr->soc_id)
- return -ENOMEM;
-
- /* format revision value passed from SCU firmware */
- val = (id >> 5) & 0xf;
- val = (((val >> 2) + 1) << 4) | (val & 0x3);
- soc_dev_attr->revision = kasprintf(GFP_KERNEL,
- "%d.%d",
- (val >> 4) & 0xf,
- val & 0xf);
- if (!soc_dev_attr->revision) {
- ret = -ENOMEM;
- goto free_soc_id;
- }
-
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- ret = PTR_ERR(soc_dev);
- goto free_revision;
- }
-
- ret = device_create_file(soc_device_to_device(soc_dev),
- &dev_attr_soc_uid);
- if (ret)
- goto free_revision;
-
- return 0;
-
-free_revision:
- kfree(soc_dev_attr->revision);
-free_soc_id:
- kfree(soc_dev_attr->soc_id);
- return ret;
-}
-
-static struct platform_driver imx_scu_soc_driver = {
- .driver = {
- .name = IMX_SCU_SOC_DRIVER_NAME,
- },
- .probe = imx_scu_soc_probe,
-};
-
-static int __init imx_scu_soc_init(void)
-{
- struct platform_device *pdev;
- struct device_node *np;
- int ret;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu");
- if (!np)
- return -ENODEV;
-
- of_node_put(np);
-
- ret = platform_driver_register(&imx_scu_soc_driver);
- if (ret)
- return ret;
-
- pdev = platform_device_register_simple(IMX_SCU_SOC_DRIVER_NAME,
- -1, NULL, 0);
- if (IS_ERR(pdev))
- platform_driver_unregister(&imx_scu_soc_driver);
-
- return PTR_ERR_OR_ZERO(pdev);
-}
-device_initcall(imx_scu_soc_init);
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
new file mode 100644
index 0000000..1e87802
--- /dev/null
+++ b/drivers/soc/imx/soc-imx.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <soc/imx/cpu.h>
+#include <soc/imx/revision.h>
+
+#define OCOTP_UID_H 0x420
+#define OCOTP_UID_L 0x410
+
+#define OCOTP_ULP_UID_1 0x4b0
+#define OCOTP_ULP_UID_2 0x4c0
+#define OCOTP_ULP_UID_3 0x4d0
+#define OCOTP_ULP_UID_4 0x4e0
+
+static int __init imx_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const char *ocotp_compat = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ struct regmap *ocotp = NULL;
+ const char *soc_id;
+ u64 soc_uid = 0;
+ u32 val;
+ int ret;
+
+ /* Return early if this is running on devices with different SoCs */
+ if (!__mxc_cpu_type)
+ return 0;
+
+ if (of_machine_is_compatible("fsl,ls1021a"))
+ return 0;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ of_node_put(root);
+ if (ret)
+ goto free_soc;
+
+ switch (__mxc_cpu_type) {
+ case MXC_CPU_MX1:
+ soc_id = "i.MX1";
+ break;
+ case MXC_CPU_MX21:
+ soc_id = "i.MX21";
+ break;
+ case MXC_CPU_MX25:
+ soc_id = "i.MX25";
+ break;
+ case MXC_CPU_MX27:
+ soc_id = "i.MX27";
+ break;
+ case MXC_CPU_MX31:
+ soc_id = "i.MX31";
+ break;
+ case MXC_CPU_MX35:
+ soc_id = "i.MX35";
+ break;
+ case MXC_CPU_MX51:
+ soc_id = "i.MX51";
+ break;
+ case MXC_CPU_MX53:
+ soc_id = "i.MX53";
+ break;
+ case MXC_CPU_IMX6SL:
+ ocotp_compat = "fsl,imx6sl-ocotp";
+ soc_id = "i.MX6SL";
+ break;
+ case MXC_CPU_IMX6DL:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6DL";
+ break;
+ case MXC_CPU_IMX6SX:
+ ocotp_compat = "fsl,imx6sx-ocotp";
+ soc_id = "i.MX6SX";
+ break;
+ case MXC_CPU_IMX6Q:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6Q";
+ break;
+ case MXC_CPU_IMX6UL:
+ ocotp_compat = "fsl,imx6ul-ocotp";
+ soc_id = "i.MX6UL";
+ break;
+ case MXC_CPU_IMX6ULL:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULL";
+ break;
+ case MXC_CPU_IMX6ULZ:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULZ";
+ break;
+ case MXC_CPU_IMX6SLL:
+ ocotp_compat = "fsl,imx6sll-ocotp";
+ soc_id = "i.MX6SLL";
+ break;
+ case MXC_CPU_IMX7D:
+ ocotp_compat = "fsl,imx7d-ocotp";
+ soc_id = "i.MX7D";
+ break;
+ case MXC_CPU_IMX7ULP:
+ ocotp_compat = "fsl,imx7ulp-ocotp";
+ soc_id = "i.MX7ULP";
+ break;
+ case MXC_CPU_VF500:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF500";
+ break;
+ case MXC_CPU_VF510:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF510";
+ break;
+ case MXC_CPU_VF600:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF600";
+ break;
+ case MXC_CPU_VF610:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF610";
+ break;
+ default:
+ soc_id = "Unknown";
+ }
+ soc_dev_attr->soc_id = soc_id;
+
+ if (ocotp_compat) {
+ ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat);
+ if (IS_ERR(ocotp))
+ pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat);
+ }
+
+ if (!IS_ERR_OR_NULL(ocotp)) {
+ if (__mxc_cpu_type == MXC_CPU_IMX7ULP) {
+ regmap_read(ocotp, OCOTP_ULP_UID_4, &val);
+ soc_uid = val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_3, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_2, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ } else {
+ regmap_read(ocotp, OCOTP_UID_H, &val);
+ soc_uid = val;
+ regmap_read(ocotp, OCOTP_UID_L, &val);
+ soc_uid <<= 32;
+ soc_uid |= val;
+ }
+ }
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (imx_get_soc_revision() >> 4) & 0xf,
+ imx_get_soc_revision() & 0xf);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto free_soc;
+ }
+
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
+ goto free_rev;
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_serial_number;
+ }
+
+ return 0;
+
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
+free_rev:
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ return ret;
+}
+device_initcall(imx_soc_device_init);
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8m.c
similarity index 70%
rename from drivers/soc/imx/soc-imx8.c
rename to drivers/soc/imx/soc-imx8m.c
index b983157..cc57a38 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/platform_device.h>
+#include <linux/arm-smccc.h>
#include <linux/of.h>
#define REV_B1 0x21
@@ -16,9 +17,13 @@
#define IMX8MQ_SW_INFO_B1 0x40
#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+#define IMX_SIP_GET_SOC_INFO 0xc2000006
+
#define OCOTP_UID_LOW 0x410
#define OCOTP_UID_HIGH 0x420
+#define IMX8MP_OCOTP_UID_OFFSET 0x10
+
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
@@ -29,40 +34,54 @@
static u64 soc_uid;
-static ssize_t soc_uid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+#ifdef CONFIG_HAVE_ARM_SMCCC
+static u32 imx8mq_soc_revision_from_atf(void)
{
- return sprintf(buf, "%016llX\n", soc_uid);
-}
+ struct arm_smccc_res res;
-static DEVICE_ATTR_RO(soc_uid);
+ arm_smccc_smc(IMX_SIP_GET_SOC_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
+ return 0;
+ else
+ return res.a0 & 0xff;
+}
+#else
+static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; };
+#endif
static u32 __init imx8mq_soc_revision(void)
{
struct device_node *np;
void __iomem *ocotp_base;
u32 magic;
- u32 rev = 0;
+ u32 rev;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
if (!np)
- goto out;
+ return 0;
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
- magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
- if (magic == IMX8MQ_SW_MAGIC_B1)
- rev = REV_B1;
+ /*
+ * SOC revision on older imx8mq is not available in fuses so query
+ * the value from ATF instead.
+ */
+ rev = imx8mq_soc_revision_from_atf();
+ if (!rev) {
+ magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
+ if (magic == IMX8MQ_SW_MAGIC_B1)
+ rev = REV_B1;
+ }
soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
iounmap(ocotp_base);
-
-out:
of_node_put(np);
+
return rev;
}
@@ -70,6 +89,8 @@
{
void __iomem *ocotp_base;
struct device_node *np;
+ u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ IMX8MP_OCOTP_UID_OFFSET : 0;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
if (!np)
@@ -78,9 +99,9 @@
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
iounmap(ocotp_base);
of_node_put(np);
@@ -124,10 +145,16 @@
.soc_revision = imx8mm_soc_revision,
};
-static const struct of_device_id imx8_soc_match[] = {
+static const struct imx8_soc_data imx8mp_soc_data = {
+ .name = "i.MX8MP",
+ .soc_revision = imx8mm_soc_revision,
+};
+
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
+ { .compatible = "fsl,imx8mp", .data = &imx8mp_soc_data, },
{ }
};
@@ -174,22 +201,28 @@
goto free_soc;
}
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- ret = PTR_ERR(soc_dev);
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
goto free_rev;
}
- ret = device_create_file(soc_device_to_device(soc_dev),
- &dev_attr_soc_uid);
- if (ret)
- goto free_rev;
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_serial_number;
+ }
+
+ pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
+ soc_dev_attr->revision);
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
return 0;
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
free_rev:
if (strcmp(soc_dev_attr->revision, "unknown"))
kfree(soc_dev_attr->revision);
diff --git a/drivers/soc/kendryte/Kconfig b/drivers/soc/kendryte/Kconfig
new file mode 100644
index 0000000..49785b1
--- /dev/null
+++ b/drivers/soc/kendryte/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if SOC_KENDRYTE
+
+config K210_SYSCTL
+ bool "Kendryte K210 system controller"
+ default y
+ depends on RISCV
+ help
+ Enables controlling the K210 various clocks and to enable
+ general purpose use of the extra 2MB of SRAM normally
+ reserved for the AI engine.
+
+endif
diff --git a/drivers/soc/kendryte/Makefile b/drivers/soc/kendryte/Makefile
new file mode 100644
index 0000000..002d9ce
--- /dev/null
+++ b/drivers/soc/kendryte/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_K210_SYSCTL) += k210-sysctl.o
diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c
new file mode 100644
index 0000000..7070192
--- /dev/null
+++ b/drivers/soc/kendryte/k210-sysctl.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Christoph Hellwig.
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ */
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/bitfield.h>
+#include <asm/soc.h>
+
+#define K210_SYSCTL_CLK0_FREQ 26000000UL
+
+/* Registers base address */
+#define K210_SYSCTL_SYSCTL_BASE_ADDR 0x50440000ULL
+
+/* Registers */
+#define K210_SYSCTL_PLL0 0x08
+#define K210_SYSCTL_PLL1 0x0c
+/* clkr: 4bits, clkf1: 6bits, clkod: 4bits, bwadj: 4bits */
+#define PLL_RESET (1 << 20)
+#define PLL_PWR (1 << 21)
+#define PLL_INTFB (1 << 22)
+#define PLL_BYPASS (1 << 23)
+#define PLL_TEST (1 << 24)
+#define PLL_OUT_EN (1 << 25)
+#define PLL_TEST_EN (1 << 26)
+#define K210_SYSCTL_PLL_LOCK 0x18
+#define PLL0_LOCK1 (1 << 0)
+#define PLL0_LOCK2 (1 << 1)
+#define PLL0_SLIP_CLEAR (1 << 2)
+#define PLL0_TEST_CLK_OUT (1 << 3)
+#define PLL1_LOCK1 (1 << 8)
+#define PLL1_LOCK2 (1 << 9)
+#define PLL1_SLIP_CLEAR (1 << 10)
+#define PLL1_TEST_CLK_OUT (1 << 11)
+#define PLL2_LOCK1 (1 << 16)
+#define PLL2_LOCK2 (1 << 16)
+#define PLL2_SLIP_CLEAR (1 << 18)
+#define PLL2_TEST_CLK_OUT (1 << 19)
+#define K210_SYSCTL_CLKSEL0 0x20
+#define CLKSEL_ACLK (1 << 0)
+#define K210_SYSCTL_CLKEN_CENT 0x28
+#define CLKEN_CPU (1 << 0)
+#define CLKEN_SRAM0 (1 << 1)
+#define CLKEN_SRAM1 (1 << 2)
+#define CLKEN_APB0 (1 << 3)
+#define CLKEN_APB1 (1 << 4)
+#define CLKEN_APB2 (1 << 5)
+#define K210_SYSCTL_CLKEN_PERI 0x2c
+#define CLKEN_ROM (1 << 0)
+#define CLKEN_DMA (1 << 1)
+#define CLKEN_AI (1 << 2)
+#define CLKEN_DVP (1 << 3)
+#define CLKEN_FFT (1 << 4)
+#define CLKEN_GPIO (1 << 5)
+#define CLKEN_SPI0 (1 << 6)
+#define CLKEN_SPI1 (1 << 7)
+#define CLKEN_SPI2 (1 << 8)
+#define CLKEN_SPI3 (1 << 9)
+#define CLKEN_I2S0 (1 << 10)
+#define CLKEN_I2S1 (1 << 11)
+#define CLKEN_I2S2 (1 << 12)
+#define CLKEN_I2C0 (1 << 13)
+#define CLKEN_I2C1 (1 << 14)
+#define CLKEN_I2C2 (1 << 15)
+#define CLKEN_UART1 (1 << 16)
+#define CLKEN_UART2 (1 << 17)
+#define CLKEN_UART3 (1 << 18)
+#define CLKEN_AES (1 << 19)
+#define CLKEN_FPIO (1 << 20)
+#define CLKEN_TIMER0 (1 << 21)
+#define CLKEN_TIMER1 (1 << 22)
+#define CLKEN_TIMER2 (1 << 23)
+#define CLKEN_WDT0 (1 << 24)
+#define CLKEN_WDT1 (1 << 25)
+#define CLKEN_SHA (1 << 26)
+#define CLKEN_OTP (1 << 27)
+#define CLKEN_RTC (1 << 29)
+
+struct k210_sysctl {
+ void __iomem *regs;
+ struct clk_hw hw;
+};
+
+static void k210_set_bits(u32 val, void __iomem *reg)
+{
+ writel(readl(reg) | val, reg);
+}
+
+static void k210_clear_bits(u32 val, void __iomem *reg)
+{
+ writel(readl(reg) & ~val, reg);
+}
+
+static void k210_pll1_enable(void __iomem *regs)
+{
+ u32 val;
+
+ val = readl(regs + K210_SYSCTL_PLL1);
+ val &= ~GENMASK(19, 0); /* clkr1 = 0 */
+ val |= FIELD_PREP(GENMASK(9, 4), 0x3B); /* clkf1 = 59 */
+ val |= FIELD_PREP(GENMASK(13, 10), 0x3); /* clkod1 = 3 */
+ val |= FIELD_PREP(GENMASK(19, 14), 0x3B); /* bwadj1 = 59 */
+ writel(val, regs + K210_SYSCTL_PLL1);
+
+ k210_clear_bits(PLL_BYPASS, regs + K210_SYSCTL_PLL1);
+ k210_set_bits(PLL_PWR, regs + K210_SYSCTL_PLL1);
+
+ /*
+ * Reset the pll. The magic NOPs come from the Kendryte reference SDK.
+ */
+ k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+ k210_set_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+ nop();
+ nop();
+ k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+
+ for (;;) {
+ val = readl(regs + K210_SYSCTL_PLL_LOCK);
+ if (val & PLL1_LOCK2)
+ break;
+ writel(val | PLL1_SLIP_CLEAR, regs + K210_SYSCTL_PLL_LOCK);
+ }
+
+ k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL1);
+}
+
+static unsigned long k210_sysctl_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_sysctl *s = container_of(hw, struct k210_sysctl, hw);
+ u32 clksel0, pll0;
+ u64 pll0_freq, clkr0, clkf0, clkod0;
+
+ /*
+ * If the clock selector is not set, use the base frequency.
+ * Otherwise, use PLL0 frequency with a frequency divisor.
+ */
+ clksel0 = readl(s->regs + K210_SYSCTL_CLKSEL0);
+ if (!(clksel0 & CLKSEL_ACLK))
+ return K210_SYSCTL_CLK0_FREQ;
+
+ /*
+ * Get PLL0 frequency:
+ * freq = base frequency * clkf0 / (clkr0 * clkod0)
+ */
+ pll0 = readl(s->regs + K210_SYSCTL_PLL0);
+ clkr0 = 1 + FIELD_GET(GENMASK(3, 0), pll0);
+ clkf0 = 1 + FIELD_GET(GENMASK(9, 4), pll0);
+ clkod0 = 1 + FIELD_GET(GENMASK(13, 10), pll0);
+ pll0_freq = clkf0 * K210_SYSCTL_CLK0_FREQ / (clkr0 * clkod0);
+
+ /* Get the frequency divisor from the clock selector */
+ return pll0_freq / (2ULL << FIELD_GET(0x00000006, clksel0));
+}
+
+static const struct clk_ops k210_sysctl_clk_ops = {
+ .recalc_rate = k210_sysctl_clk_recalc_rate,
+};
+
+static const struct clk_init_data k210_clk_init_data = {
+ .name = "k210-sysctl-pll1",
+ .ops = &k210_sysctl_clk_ops,
+};
+
+static int k210_sysctl_probe(struct platform_device *pdev)
+{
+ struct k210_sysctl *s;
+ int error;
+
+ pr_info("Kendryte K210 SoC sysctl\n");
+
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->regs = devm_ioremap_resource(&pdev->dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(s->regs))
+ return PTR_ERR(s->regs);
+
+ s->hw.init = &k210_clk_init_data;
+ error = devm_clk_hw_register(&pdev->dev, &s->hw);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register clk");
+ return error;
+ }
+
+ error = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
+ &s->hw);
+ if (error) {
+ dev_err(&pdev->dev, "adding clk provider failed\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id k210_sysctl_of_match[] = {
+ { .compatible = "kendryte,k210-sysctl", },
+ {}
+};
+
+static struct platform_driver k210_sysctl_driver = {
+ .driver = {
+ .name = "k210-sysctl",
+ .of_match_table = k210_sysctl_of_match,
+ },
+ .probe = k210_sysctl_probe,
+};
+
+static int __init k210_sysctl_init(void)
+{
+ return platform_driver_register(&k210_sysctl_driver);
+}
+core_initcall(k210_sysctl_init);
+
+/*
+ * This needs to be called very early during initialization, given that
+ * PLL1 needs to be enabled to be able to use all SRAM.
+ */
+static void __init k210_soc_early_init(const void *fdt)
+{
+ void __iomem *regs;
+
+ regs = ioremap(K210_SYSCTL_SYSCTL_BASE_ADDR, 0x1000);
+ if (!regs)
+ panic("K210 sysctl ioremap");
+
+ /* Enable PLL1 to make the KPU SRAM useable */
+ k210_pll1_enable(regs);
+
+ k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL0);
+
+ k210_set_bits(CLKEN_CPU | CLKEN_SRAM0 | CLKEN_SRAM1,
+ regs + K210_SYSCTL_CLKEN_CENT);
+ k210_set_bits(CLKEN_ROM | CLKEN_TIMER0 | CLKEN_RTC,
+ regs + K210_SYSCTL_CLKEN_PERI);
+
+ k210_set_bits(CLKSEL_ACLK, regs + K210_SYSCTL_CLKSEL0);
+
+ iounmap(regs);
+}
+SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init);
+
+#ifdef CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN
+/*
+ * Generic entry for the default k210.dtb embedded DTB for boards with:
+ * - Vendor ID: 0x4B5
+ * - Arch ID: 0xE59889E6A5A04149 (= "Canaan AI" in UTF-8 encoded Chinese)
+ * - Impl ID: 0x4D41495832303030 (= "MAIX2000")
+ * These values are reported by the SiPEED MAXDUINO, SiPEED MAIX GO and
+ * SiPEED Dan dock boards.
+ */
+SOC_BUILTIN_DTB_DECLARE(k210, 0x4B5, 0xE59889E6A5A04149, 0x4D41495832303030);
+#endif
diff --git a/drivers/soc/lantiq/fpi-bus.c b/drivers/soc/lantiq/fpi-bus.c
index cb0303a..dff1375 100644
--- a/drivers/soc/lantiq/fpi-bus.c
+++ b/drivers/soc/lantiq/fpi-bus.c
@@ -28,14 +28,12 @@
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res_xbar;
struct regmap *rcu_regmap;
void __iomem *xbar_membase;
u32 rcu_ahb_endianness_reg_offset;
int ret;
- res_xbar = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xbar_membase = devm_ioremap_resource(dev, res_xbar);
+ xbar_membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xbar_membase))
return PTR_ERR(xbar_membase);
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 2114b56..59a56cd 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -44,4 +44,11 @@
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+config MTK_MMSYS
+ bool "MediaTek MMSYS Support"
+ default ARCH_MEDIATEK
+ help
+ Say yes here to add support for the MediaTek Multimedia
+ Subsystem (MMSYS).
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index b017330..01f9f87 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -3,3 +3,4 @@
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
+obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 34eec26..505651b 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -9,11 +9,66 @@
#include <linux/mailbox_controller.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
-#define CMDQ_ARG_A_WRITE_MASK 0xffff
#define CMDQ_WRITE_ENABLE_MASK BIT(0)
+#define CMDQ_POLL_ENABLE_MASK BIT(0)
#define CMDQ_EOC_IRQ_EN BIT(0)
-#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
- << 32 | CMDQ_EOC_IRQ_EN)
+#define CMDQ_REG_TYPE 1
+#define CMDQ_JUMP_RELATIVE 1
+
+struct cmdq_instruction {
+ union {
+ u32 value;
+ u32 mask;
+ struct {
+ u16 arg_c;
+ u16 src_reg;
+ };
+ };
+ union {
+ u16 offset;
+ u16 event;
+ u16 reg_dst;
+ };
+ union {
+ u8 subsys;
+ struct {
+ u8 sop:5;
+ u8 arg_c_t:1;
+ u8 src_t:1;
+ u8 dst_t:1;
+ };
+ };
+ u8 op;
+};
+
+int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx)
+{
+ struct of_phandle_args spec;
+ int err;
+
+ if (!client_reg)
+ return -ENOENT;
+
+ err = of_parse_phandle_with_fixed_args(dev->of_node,
+ "mediatek,gce-client-reg",
+ 3, idx, &spec);
+ if (err < 0) {
+ dev_err(dev,
+ "error %d can't parse gce-client-reg property (%d)",
+ err, idx);
+
+ return err;
+ }
+
+ client_reg->subsys = (u8)spec.args[0];
+ client_reg->offset = (u16)spec.args[1];
+ client_reg->size = (u16)spec.args[2];
+ of_node_put(spec.np);
+
+ return 0;
+}
+EXPORT_SYMBOL(cmdq_dev_get_client_reg);
static void cmdq_client_timeout(struct timer_list *t)
{
@@ -111,10 +166,10 @@
}
EXPORT_SYMBOL(cmdq_pkt_destroy);
-static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
- u32 arg_a, u32 arg_b)
+static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
+ struct cmdq_instruction inst)
{
- u64 *cmd_ptr;
+ struct cmdq_instruction *cmd_ptr;
if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
/*
@@ -130,8 +185,9 @@
__func__, (u32)pkt->buf_size);
return -ENOMEM;
}
+
cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
- (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
+ *cmd_ptr = inst;
pkt->cmd_buf_size += CMDQ_INST_SIZE;
return 0;
@@ -139,71 +195,253 @@
int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
{
- u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
- (subsys << CMDQ_SUBSYS_SHIFT);
+ struct cmdq_instruction inst;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
+ inst.op = CMDQ_CODE_WRITE;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_write);
int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask)
{
- u32 offset_mask = offset;
- int err = 0;
+ struct cmdq_instruction inst = { {0} };
+ u16 offset_mask = offset;
+ int err;
if (mask != 0xffffffff) {
- err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
offset_mask |= CMDQ_WRITE_ENABLE_MASK;
}
- err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
+ err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
return err;
}
EXPORT_SYMBOL(cmdq_pkt_write_mask);
-int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
+int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
+ u16 reg_idx)
{
- u32 arg_b;
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_READ_S;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.reg_dst = reg_idx;
+ inst.src_reg = addr_low;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_read_s);
+
+int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_WRITE_S;
+ inst.src_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.src_reg = src_reg_idx;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s);
+
+int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx, u32 mask)
+{
+ struct cmdq_instruction inst = {};
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ inst.mask = 0;
+ inst.op = CMDQ_CODE_WRITE_S_MASK;
+ inst.src_t = CMDQ_REG_TYPE;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.src_reg = src_reg_idx;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
+
+int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_WRITE_S;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.value = value;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_value);
+
+int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = {};
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ inst.op = CMDQ_CODE_WRITE_S_MASK;
+ inst.sop = high_addr_reg_idx;
+ inst.offset = addr_low;
+ inst.value = value;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
+
+int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
+{
+ struct cmdq_instruction inst = { {0} };
+ u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- /*
- * WFE arg_b
- * bit 0-11: wait value
- * bit 15: 1 - wait, 0 - no wait
- * bit 16-27: update value
- * bit 31: 1 - update, 0 - no update
- */
- arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_OPTION | clear_option;
+ inst.event = event;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_wfe);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
{
+ struct cmdq_instruction inst = { {0} };
+
if (event >= CMDQ_MAX_EVENT)
return -EINVAL;
- return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
- CMDQ_WFE_UPDATE);
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
}
EXPORT_SYMBOL(cmdq_pkt_clear_event);
-static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
{
+ struct cmdq_instruction inst = {};
+
+ if (event >= CMDQ_MAX_EVENT)
+ return -EINVAL;
+
+ inst.op = CMDQ_CODE_WFE;
+ inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
+ inst.event = event;
+
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_set_event);
+
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value)
+{
+ struct cmdq_instruction inst = { {0} };
int err;
- /* insert EOC and generate IRQ for each command iteration */
- err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
-
- /* JUMP to end */
- err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
+ inst.op = CMDQ_CODE_POLL;
+ inst.value = value;
+ inst.offset = offset;
+ inst.subsys = subsys;
+ err = cmdq_pkt_append_command(pkt, inst);
return err;
}
+EXPORT_SYMBOL(cmdq_pkt_poll);
+
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ inst.op = CMDQ_CODE_MASK;
+ inst.mask = ~mask;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ offset = offset | CMDQ_POLL_ENABLE_MASK;
+ err = cmdq_pkt_poll(pkt, subsys, offset, value);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_poll_mask);
+
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_LOGIC;
+ inst.dst_t = CMDQ_REG_TYPE;
+ inst.reg_dst = reg_idx;
+ inst.value = value;
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_assign);
+
+int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
+{
+ struct cmdq_instruction inst = {};
+
+ inst.op = CMDQ_CODE_JUMP;
+ inst.offset = CMDQ_JUMP_RELATIVE;
+ inst.value = addr >>
+ cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ return cmdq_pkt_append_command(pkt, inst);
+}
+EXPORT_SYMBOL(cmdq_pkt_jump);
+
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
+{
+ struct cmdq_instruction inst = { {0} };
+ int err;
+
+ /* insert EOC and generate IRQ for each command iteration */
+ inst.op = CMDQ_CODE_EOC;
+ inst.value = CMDQ_EOC_IRQ_EN;
+ err = cmdq_pkt_append_command(pkt, inst);
+ if (err < 0)
+ return err;
+
+ /* JUMP to end */
+ inst.op = CMDQ_CODE_JUMP;
+ inst.value = CMDQ_JUMP_PASS >>
+ cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
+ err = cmdq_pkt_append_command(pkt, inst);
+
+ return err;
+}
+EXPORT_SYMBOL(cmdq_pkt_finalize);
static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
{
@@ -238,10 +476,6 @@
unsigned long flags = 0;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
- err = cmdq_pkt_finalize(pkt);
- if (err < 0)
- return err;
-
pkt->cb.cb = cb;
pkt->cb.data = data;
pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
index 341c7ac..4a12379 100644
--- a/drivers/soc/mediatek/mtk-infracfg.c
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -19,7 +19,7 @@
/**
* mtk_infracfg_set_bus_protection - enable bus protection
- * @regmap: The infracfg regmap
+ * @infracfg: The infracfg regmap
* @mask: The mask containing the protection bits to be enabled.
* @reg_update: The boolean flag determines to set the protection bits
* by regmap_update_bits with enable register(PROTECTEN) or
@@ -50,7 +50,7 @@
/**
* mtk_infracfg_clear_bus_protection - disable bus protection
- * @regmap: The infracfg regmap
+ * @infracfg: The infracfg regmap
* @mask: The mask containing the protection bits to be disabled.
* @reg_update: The boolean flag determines to clear the protection bits
* by regmap_update_bits with enable register(PROTECTEN) or
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
new file mode 100644
index 0000000..36ad66b
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+
+#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
+#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
+#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
+#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
+#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
+#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
+#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
+#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
+#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+#define DISP_REG_CONFIG_DPI_SEL 0x064
+
+#define OVL0_MOUT_EN_COLOR0 0x1
+#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
+#define UFOE_MOUT_EN_DSI0 0x1
+#define COLOR0_SEL_IN_OVL0 0x1
+#define OVL1_MOUT_EN_COLOR1 0x1
+#define GAMMA_MOUT_EN_RDMA1 0x1
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
+#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
+#define COLOR1_SEL_IN_OVL1 0x1
+
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
+#define DSI_SEL_IN_BLS 0x0
+#define DPI_SEL_IN_BLS 0x0
+#define DSI_SEL_IN_RDMA 0x1
+
+struct mtk_mmsys_driver_data {
+ const char *clk_driver;
+};
+
+static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
+ .clk_driver = "clk-mt2701-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+ .clk_driver = "clk-mt2712-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt6779_mmsys_driver_data = {
+ .clk_driver = "clk-mt6779-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt6797_mmsys_driver_data = {
+ .clk_driver = "clk-mt6797-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
+ .clk_driver = "clk-mt8173-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+ .clk_driver = "clk-mt8183-mm",
+};
+
+static unsigned int mtk_mmsys_ddp_mout_en(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
+ value = OVL0_MOUT_EN_COLOR0;
+ } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
+ value = OVL_MOUT_EN_RDMA;
+ } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD_MOUT_EN_RDMA0;
+ } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
+ value = UFOE_MOUT_EN_DSI0;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
+ value = OVL1_MOUT_EN_COLOR1;
+ } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
+ value = GAMMA_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD1_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI3;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static unsigned int mtk_mmsys_ddp_sel_in(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
+ value = COLOR0_SEL_IN_OVL0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI3_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI3_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
+ value = COLOR1_SEL_IN_OVL1;
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSI_SEL;
+ value = DSI_SEL_IN_BLS;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static void mtk_mmsys_ddp_sout_sel(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
+ writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+ writel_relaxed(DSI_SEL_IN_RDMA,
+ config_regs + DISP_REG_CONFIG_DSI_SEL);
+ writel_relaxed(DPI_SEL_IN_BLS,
+ config_regs + DISP_REG_CONFIG_DPI_SEL);
+ }
+}
+
+void mtk_mmsys_ddp_connect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ void __iomem *config_regs = dev_get_drvdata(dev);
+ unsigned int addr, value, reg;
+
+ value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ mtk_mmsys_ddp_sout_sel(config_regs, cur, next);
+
+ value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
+
+void mtk_mmsys_ddp_disconnect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ void __iomem *config_regs = dev_get_drvdata(dev);
+ unsigned int addr, value, reg;
+
+ value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+
+static int mtk_mmsys_probe(struct platform_device *pdev)
+{
+ const struct mtk_mmsys_driver_data *data;
+ struct device *dev = &pdev->dev;
+ struct platform_device *clks;
+ struct platform_device *drm;
+ void __iomem *config_regs;
+ struct resource *mem;
+ int ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ config_regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(config_regs)) {
+ ret = PTR_ERR(config_regs);
+ dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, config_regs);
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ clks = platform_device_register_data(&pdev->dev, data->clk_driver,
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(clks))
+ return PTR_ERR(clks);
+
+ drm = platform_device_register_data(&pdev->dev, "mediatek-drm",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(drm)) {
+ platform_device_unregister(clks);
+ return PTR_ERR(drm);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_match_mtk_mmsys[] = {
+ {
+ .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt6779-mmsys",
+ .data = &mt6779_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt6797-mmsys",
+ .data = &mt6797_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8183-mmsys",
+ .data = &mt8183_mmsys_driver_data,
+ },
+ { }
+};
+
+static struct platform_driver mtk_mmsys_drv = {
+ .driver = {
+ .name = "mtk-mmsys",
+ .of_match_table = of_match_mtk_mmsys,
+ },
+ .probe = mtk_mmsys_probe,
+};
+
+builtin_platform_driver(mtk_mmsys_drv);
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index c725315..5d34e8b 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -111,6 +111,28 @@
PWRAP_RG_SPI_CON13,
PWRAP_SPISLV_KEY,
+ /* MT6359 only regs */
+ PWRAP_DEW_CRC_SWRST,
+ PWRAP_DEW_RG_EN_RECORD,
+ PWRAP_DEW_RECORD_CMD0,
+ PWRAP_DEW_RECORD_CMD1,
+ PWRAP_DEW_RECORD_CMD2,
+ PWRAP_DEW_RECORD_CMD3,
+ PWRAP_DEW_RECORD_CMD4,
+ PWRAP_DEW_RECORD_CMD5,
+ PWRAP_DEW_RECORD_WDATA0,
+ PWRAP_DEW_RECORD_WDATA1,
+ PWRAP_DEW_RECORD_WDATA2,
+ PWRAP_DEW_RECORD_WDATA3,
+ PWRAP_DEW_RECORD_WDATA4,
+ PWRAP_DEW_RECORD_WDATA5,
+ PWRAP_DEW_RG_ADDR_TARGET,
+ PWRAP_DEW_RG_ADDR_MASK,
+ PWRAP_DEW_RG_WDATA_TARGET,
+ PWRAP_DEW_RG_WDATA_MASK,
+ PWRAP_DEW_RG_SPI_RECORD_CLR,
+ PWRAP_DEW_RG_CMD_ALERT_CLR,
+
/* MT6397 only regs */
PWRAP_DEW_EVENT_OUT_EN,
PWRAP_DEW_EVENT_SRC_EN,
@@ -197,6 +219,42 @@
[PWRAP_SPISLV_KEY] = 0x044a,
};
+static const u32 mt6359_regs[] = {
+ [PWRAP_DEW_RG_EN_RECORD] = 0x040a,
+ [PWRAP_DEW_DIO_EN] = 0x040c,
+ [PWRAP_DEW_READ_TEST] = 0x040e,
+ [PWRAP_DEW_WRITE_TEST] = 0x0410,
+ [PWRAP_DEW_CRC_SWRST] = 0x0412,
+ [PWRAP_DEW_CRC_EN] = 0x0414,
+ [PWRAP_DEW_CRC_VAL] = 0x0416,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0418,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x041a,
+ [PWRAP_DEW_CIPHER_EN] = 0x041c,
+ [PWRAP_DEW_CIPHER_RDY] = 0x041e,
+ [PWRAP_DEW_CIPHER_MODE] = 0x0420,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x0422,
+ [PWRAP_DEW_RDDMY_NO] = 0x0424,
+ [PWRAP_DEW_RECORD_CMD0] = 0x0428,
+ [PWRAP_DEW_RECORD_CMD1] = 0x042a,
+ [PWRAP_DEW_RECORD_CMD2] = 0x042c,
+ [PWRAP_DEW_RECORD_CMD3] = 0x042e,
+ [PWRAP_DEW_RECORD_CMD4] = 0x0430,
+ [PWRAP_DEW_RECORD_CMD5] = 0x0432,
+ [PWRAP_DEW_RECORD_WDATA0] = 0x0434,
+ [PWRAP_DEW_RECORD_WDATA1] = 0x0436,
+ [PWRAP_DEW_RECORD_WDATA2] = 0x0438,
+ [PWRAP_DEW_RECORD_WDATA3] = 0x043a,
+ [PWRAP_DEW_RECORD_WDATA4] = 0x043c,
+ [PWRAP_DEW_RECORD_WDATA5] = 0x043e,
+ [PWRAP_DEW_RG_ADDR_TARGET] = 0x0440,
+ [PWRAP_DEW_RG_ADDR_MASK] = 0x0442,
+ [PWRAP_DEW_RG_WDATA_TARGET] = 0x0444,
+ [PWRAP_DEW_RG_WDATA_MASK] = 0x0446,
+ [PWRAP_DEW_RG_SPI_RECORD_CLR] = 0x0448,
+ [PWRAP_DEW_RG_CMD_ALERT_CLR] = 0x0448,
+ [PWRAP_SPISLV_KEY] = 0x044a,
+};
+
static const u32 mt6397_regs[] = {
[PWRAP_DEW_BASE] = 0xbc00,
[PWRAP_DEW_EVENT_OUT_EN] = 0xbc00,
@@ -497,6 +555,45 @@
[PWRAP_DCM_DBC_PRD] = 0x1E0,
};
+static int mt6779_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_RDDMY] = 0x20,
+ [PWRAP_CSHEXT_WRITE] = 0x24,
+ [PWRAP_CSHEXT_READ] = 0x28,
+ [PWRAP_CSLEXT_WRITE] = 0x2C,
+ [PWRAP_CSLEXT_READ] = 0x30,
+ [PWRAP_EXT_CK_WRITE] = 0x34,
+ [PWRAP_STAUPD_CTRL] = 0x3C,
+ [PWRAP_STAUPD_GRPEN] = 0x40,
+ [PWRAP_EINT_STA0_ADR] = 0x44,
+ [PWRAP_HARB_HPRIO] = 0x68,
+ [PWRAP_HIPRIO_ARB_EN] = 0x6C,
+ [PWRAP_MAN_EN] = 0x7C,
+ [PWRAP_MAN_CMD] = 0x80,
+ [PWRAP_WACS0_EN] = 0x8C,
+ [PWRAP_INIT_DONE0] = 0x90,
+ [PWRAP_WACS1_EN] = 0x94,
+ [PWRAP_WACS2_EN] = 0x9C,
+ [PWRAP_INIT_DONE1] = 0x98,
+ [PWRAP_INIT_DONE2] = 0xA0,
+ [PWRAP_INT_EN] = 0xBC,
+ [PWRAP_INT_FLG_RAW] = 0xC0,
+ [PWRAP_INT_FLG] = 0xC4,
+ [PWRAP_INT_CLR] = 0xC8,
+ [PWRAP_INT1_EN] = 0xCC,
+ [PWRAP_INT1_FLG] = 0xD4,
+ [PWRAP_INT1_CLR] = 0xD8,
+ [PWRAP_TIMER_EN] = 0xF0,
+ [PWRAP_WDT_UNIT] = 0xF8,
+ [PWRAP_WDT_SRC_EN] = 0xFC,
+ [PWRAP_WDT_SRC_EN_1] = 0x100,
+ [PWRAP_WACS2_CMD] = 0xC20,
+ [PWRAP_WACS2_RDATA] = 0xC24,
+ [PWRAP_WACS2_VLDCLR] = 0xC28,
+};
+
static int mt6797_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -938,6 +1035,7 @@
PMIC_MT6351,
PMIC_MT6357,
PMIC_MT6358,
+ PMIC_MT6359,
PMIC_MT6380,
PMIC_MT6397,
};
@@ -945,6 +1043,7 @@
enum pwrap_type {
PWRAP_MT2701,
PWRAP_MT6765,
+ PWRAP_MT6779,
PWRAP_MT6797,
PWRAP_MT7622,
PWRAP_MT8135,
@@ -1377,6 +1476,7 @@
break;
case PWRAP_MT2701:
case PWRAP_MT6765:
+ case PWRAP_MT6779:
case PWRAP_MT6797:
case PWRAP_MT8173:
case PWRAP_MT8516:
@@ -1711,6 +1811,15 @@
.pwrap_write = pwrap_write16,
};
+static const struct pwrap_slv_type pmic_mt6359 = {
+ .dew_regs = mt6359_regs,
+ .type = PMIC_MT6359,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_DUALIO,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
static const struct pwrap_slv_type pmic_mt6380 = {
.dew_regs = NULL,
.type = PMIC_MT6380,
@@ -1744,6 +1853,9 @@
.compatible = "mediatek,mt6358",
.data = &pmic_mt6358,
}, {
+ .compatible = "mediatek,mt6359",
+ .data = &pmic_mt6359,
+ }, {
/* The MT6380 PMIC only implements a regulator, so we bind it
* directly instead of using a MFD.
*/
@@ -1783,6 +1895,19 @@
.init_soc_specific = NULL,
};
+static const struct pmic_wrapper_type pwrap_mt6779 = {
+ .regs = mt6779_regs,
+ .type = PWRAP_MT6779,
+ .arb_en_all = 0xfbb7f,
+ .int_en_all = 0xfffffffe,
+ .int1_en_all = 0,
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct pmic_wrapper_type pwrap_mt6797 = {
.regs = mt6797_regs,
.type = PWRAP_MT6797,
@@ -1868,6 +1993,9 @@
.compatible = "mediatek,mt6765-pwrap",
.data = &pwrap_mt6765,
}, {
+ .compatible = "mediatek,mt6779-pwrap",
+ .data = &pwrap_mt6779,
+ }, {
.compatible = "mediatek,mt6797-pwrap",
.data = &pwrap_mt6797,
}, {
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 75f25f0..ca75b14 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -21,7 +21,7 @@
#include <dt-bindings/power/mt8173-power.h>
#define MTK_POLL_DELAY_US 10
-#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+#define MTK_POLL_TIMEOUT USEC_PER_SEC
#define MTK_SCPD_ACTIVE_WAKEUP BIT(0)
#define MTK_SCPD_FWAIT_SRAM BIT(1)
@@ -108,6 +108,17 @@
#define MAX_CLKS 3
+/**
+ * struct scp_domain_data - scp domain data for power on/off flow
+ * @name: The domain name.
+ * @sta_mask: The mask for power on/off status bit.
+ * @ctl_offs: The offset for main power control register.
+ * @sram_pdn_bits: The mask for sram power control bits.
+ * @sram_pdn_ack_bits: The mask for sram power control acked bits.
+ * @bus_prot_mask: The mask for single step bus protection.
+ * @clk_id: The basic clocks required by this power domain.
+ * @caps: The flag for active wake-up action.
+ */
struct scp_domain_data {
const char *name;
u32 sta_mask;
@@ -180,32 +191,132 @@
return -EINVAL;
}
+static int scpsys_regulator_enable(struct scp_domain *scpd)
+{
+ if (!scpd->supply)
+ return 0;
+
+ return regulator_enable(scpd->supply);
+}
+
+static int scpsys_regulator_disable(struct scp_domain *scpd)
+{
+ if (!scpd->supply)
+ return 0;
+
+ return regulator_disable(scpd->supply);
+}
+
+static void scpsys_clk_disable(struct clk *clk[], int max_num)
+{
+ int i;
+
+ for (i = max_num - 1; i >= 0; i--)
+ clk_disable_unprepare(clk[i]);
+}
+
+static int scpsys_clk_enable(struct clk *clk[], int max_num)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < max_num && clk[i]; i++) {
+ ret = clk_prepare_enable(clk[i]);
+ if (ret) {
+ scpsys_clk_disable(clk, i);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int scpsys_sram_enable(struct scp_domain *scpd, void __iomem *ctl_addr)
+{
+ u32 val;
+ u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+ int tmp;
+
+ val = readl(ctl_addr);
+ val &= ~scpd->data->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
+ if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
+ /*
+ * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
+ * MT7622_POWER_DOMAIN_WB and thus just a trivial setup
+ * is applied here.
+ */
+ usleep_range(12000, 12100);
+ } else {
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ int ret = readl_poll_timeout(ctl_addr, tmp,
+ (tmp & pdn_ack) == 0,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_sram_disable(struct scp_domain *scpd, void __iomem *ctl_addr)
+{
+ u32 val;
+ u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
+ int tmp;
+
+ val = readl(ctl_addr);
+ val |= scpd->data->sram_pdn_bits;
+ writel(val, ctl_addr);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ return readl_poll_timeout(ctl_addr, tmp,
+ (tmp & pdn_ack) == pdn_ack,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+}
+
+static int scpsys_bus_protect_enable(struct scp_domain *scpd)
+{
+ struct scp *scp = scpd->scp;
+
+ if (!scpd->data->bus_prot_mask)
+ return 0;
+
+ return mtk_infracfg_set_bus_protection(scp->infracfg,
+ scpd->data->bus_prot_mask,
+ scp->bus_prot_reg_update);
+}
+
+static int scpsys_bus_protect_disable(struct scp_domain *scpd)
+{
+ struct scp *scp = scpd->scp;
+
+ if (!scpd->data->bus_prot_mask)
+ return 0;
+
+ return mtk_infracfg_clear_bus_protection(scp->infracfg,
+ scpd->data->bus_prot_mask,
+ scp->bus_prot_reg_update);
+}
+
static int scpsys_power_on(struct generic_pm_domain *genpd)
{
struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
struct scp *scp = scpd->scp;
void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
- u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
u32 val;
int ret, tmp;
- int i;
- if (scpd->supply) {
- ret = regulator_enable(scpd->supply);
- if (ret)
- return ret;
- }
+ ret = scpsys_regulator_enable(scpd);
+ if (ret < 0)
+ return ret;
- for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++) {
- ret = clk_prepare_enable(scpd->clk[i]);
- if (ret) {
- for (--i; i >= 0; i--)
- clk_disable_unprepare(scpd->clk[i]);
+ ret = scpsys_clk_enable(scpd->clk, MAX_CLKS);
+ if (ret)
+ goto err_clk;
- goto err_clk;
- }
- }
-
+ /* subsys power on */
val = readl(ctl_addr);
val |= PWR_ON_BIT;
writel(val, ctl_addr);
@@ -227,43 +338,20 @@
val |= PWR_RST_B_BIT;
writel(val, ctl_addr);
- val &= ~scpd->data->sram_pdn_bits;
- writel(val, ctl_addr);
+ ret = scpsys_sram_enable(scpd, ctl_addr);
+ if (ret < 0)
+ goto err_pwr_ack;
- /* Either wait until SRAM_PDN_ACK all 0 or have a force wait */
- if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) {
- /*
- * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for
- * MT7622_POWER_DOMAIN_WB and thus just a trivial setup is
- * applied here.
- */
- usleep_range(12000, 12100);
-
- } else {
- ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == 0,
- MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
- if (ret < 0)
- goto err_pwr_ack;
- }
-
- if (scpd->data->bus_prot_mask) {
- ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
- scpd->data->bus_prot_mask,
- scp->bus_prot_reg_update);
- if (ret)
- goto err_pwr_ack;
- }
+ ret = scpsys_bus_protect_disable(scpd);
+ if (ret < 0)
+ goto err_pwr_ack;
return 0;
err_pwr_ack:
- for (i = MAX_CLKS - 1; i >= 0; i--) {
- if (scpd->clk[i])
- clk_disable_unprepare(scpd->clk[i]);
- }
+ scpsys_clk_disable(scpd->clk, MAX_CLKS);
err_clk:
- if (scpd->supply)
- regulator_disable(scpd->supply);
+ scpsys_regulator_disable(scpd);
dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
@@ -275,29 +363,19 @@
struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
struct scp *scp = scpd->scp;
void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
- u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
u32 val;
int ret, tmp;
- int i;
- if (scpd->data->bus_prot_mask) {
- ret = mtk_infracfg_set_bus_protection(scp->infracfg,
- scpd->data->bus_prot_mask,
- scp->bus_prot_reg_update);
- if (ret)
- goto out;
- }
-
- val = readl(ctl_addr);
- val |= scpd->data->sram_pdn_bits;
- writel(val, ctl_addr);
-
- /* wait until SRAM_PDN_ACK all 1 */
- ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == pdn_ack,
- MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ ret = scpsys_bus_protect_enable(scpd);
if (ret < 0)
goto out;
+ ret = scpsys_sram_disable(scpd, ctl_addr);
+ if (ret < 0)
+ goto out;
+
+ /* subsys power off */
+ val = readl(ctl_addr);
val |= PWR_ISO_BIT;
writel(val, ctl_addr);
@@ -319,11 +397,11 @@
if (ret < 0)
goto out;
- for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++)
- clk_disable_unprepare(scpd->clk[i]);
+ scpsys_clk_disable(scpd->clk, MAX_CLKS);
- if (scpd->supply)
- regulator_disable(scpd->supply);
+ ret = scpsys_regulator_disable(scpd);
+ if (ret < 0)
+ goto out;
return 0;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 661e47a..6a3b69b 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -26,6 +26,22 @@
resource on a RPM-hardened platform must use this database to get
SoC specific identifier and information for the shared resources.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ depends on ARCH_QCOM && HAS_IOMEM
+ select PM_OPP
+ select REGMAP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like QCS404.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config QCOM_GENI_SE
tristate "QCOM GENI Serial Engine Driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -35,58 +51,48 @@
driver is also used to manage the common aspects of multiple Serial
Engines present in the QUP.
-config QCOM_GLINK_SSR
- tristate "Qualcomm Glink SSR driver"
- depends on RPMSG
- depends on QCOM_RPROC_COMMON
- help
- Say y here to enable GLINK SSR support. The GLINK SSR driver
- implements the SSR protocol for notifying the remote processor about
- neighboring subsystems going up or down.
-
config QCOM_GSBI
- tristate "QCOM General Serial Bus Interface"
- depends on ARCH_QCOM || COMPILE_TEST
- select MFD_SYSCON
- help
- Say y here to enable GSBI support. The GSBI provides control
- functions for connecting the underlying serial UART, SPI, and I2C
- devices to the output pins.
+ tristate "QCOM General Serial Bus Interface"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Say y here to enable GSBI support. The GSBI provides control
+ functions for connecting the underlying serial UART, SPI, and I2C
+ devices to the output pins.
config QCOM_LLCC
tristate "Qualcomm Technologies, Inc. LLCC driver"
depends on ARCH_QCOM || COMPILE_TEST
help
Qualcomm Technologies, Inc. platform specific
- Last Level Cache Controller(LLCC) driver. This provides interfaces
- to clients that use the LLCC. Say yes here to enable LLCC slice
- driver.
+ Last Level Cache Controller(LLCC) driver for platforms such as,
+ SDM845. This provides interfaces to clients that use the LLCC.
+ Say yes here to enable LLCC slice driver.
-config QCOM_SDM845_LLCC
- tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
- depends on QCOM_LLCC
- help
- Say yes here to enable the LLCC driver for SDM845. This provides
- data required to configure LLCC so that clients can start using the
- LLCC slices.
+config QCOM_KRYO_L2_ACCESSORS
+ bool
+ depends on ARCH_QCOM && ARM64 || COMPILE_TEST
config QCOM_MDT_LOADER
tristate
select QCOM_SCM
-config QCOM_PM
- bool "Qualcomm Power Management"
- depends on ARCH_QCOM && !ARM64
- select ARM_CPU_SUSPEND
+config QCOM_OCMEM
+ tristate "Qualcomm On Chip Memory (OCMEM) driver"
+ depends on ARCH_QCOM
select QCOM_SCM
help
- QCOM Platform specific power driver to manage cores and L2 low power
- modes. It interface with various system drivers to put the cores in
- low power modes.
+ The On Chip Memory (OCMEM) allocator allows various clients to
+ allocate memory from OCMEM based on performance, latency and power
+ requirements. This is typically used by the GPU, camera/video, and
+ audio components on some Snapdragon SoCs.
+
+config QCOM_PDR_HELPERS
+ tristate
+ select QCOM_QMI_HELPERS
config QCOM_QMI_HELPERS
tristate
- depends on ARCH_QCOM || COMPILE_TEST
depends on NET
config QCOM_RMTFS_MEM
@@ -103,7 +109,7 @@
config QCOM_RPMH
bool "Qualcomm RPM-Hardened (RPMH) Communication"
- depends on ARCH_QCOM && ARM64 || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
help
Support for communication with the hardened-RPM blocks in
Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
@@ -112,7 +118,7 @@
help apply the aggregated state on the resource.
config QCOM_RPMHPD
- bool "Qualcomm RPMh Power domain driver"
+ tristate "Qualcomm RPMh Power domain driver"
depends on QCOM_RPMH && QCOM_COMMAND_DB
help
QCOM RPMh Power domain driver to support power-domains with
@@ -121,8 +127,8 @@
for the voltage rail.
config QCOM_RPMPD
- bool "Qualcomm RPM Power domain driver"
- depends on QCOM_SMD_RPM=y
+ tristate "Qualcomm RPM Power domain driver"
+ depends on QCOM_SMD_RPM
help
QCOM RPM Power domain driver to support power-domains with
performance states. The driver communicates a performance state
@@ -195,9 +201,11 @@
tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
depends on ARCH_QCOM || COMPILE_TEST
depends on RPMSG
+ depends on NET
+ select QCOM_PDR_HELPERS
help
- Enable APR IPC protocol support between
- application processor and QDSP6. APR is
- used by audio driver to configure QDSP6
- ASM, ADM and AFE modules.
+ Enable APR IPC protocol support between
+ application processor and QDSP6. APR is
+ used by audio driver to configure QDSP6
+ ASM, ADM and AFE modules.
endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 1627887..ad675a6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,10 +3,11 @@
obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
-obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
+obj-$(CONFIG_QCOM_CPR) += cpr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
-obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
+obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
@@ -21,7 +22,7 @@
obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
-obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
-obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
+obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 4fcc324..f736d20 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/of_device.h>
#include <linux/soc/qcom/apr.h>
+#include <linux/soc/qcom/pdr.h>
#include <linux/rpmsg.h>
#include <linux/of.h>
@@ -21,6 +22,7 @@
spinlock_t rx_lock;
struct idr svcs_idr;
int dest_domain_id;
+ struct pdr_handle *pdr;
struct workqueue_struct *rxwq;
struct work_struct rx_work;
struct list_head rx_list;
@@ -289,6 +291,9 @@
id->svc_id + 1, GFP_ATOMIC);
spin_unlock(&apr->svcs_lock);
+ of_property_read_string_index(np, "qcom,protection-domain",
+ 1, &adev->service_path);
+
dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
ret = device_register(&adev->dev);
@@ -300,14 +305,77 @@
return ret;
}
-static void of_register_apr_devices(struct device *dev)
+static int of_apr_add_pd_lookups(struct device *dev)
+{
+ const char *service_name, *service_path;
+ struct apr *apr = dev_get_drvdata(dev);
+ struct device_node *node;
+ struct pdr_service *pds;
+ int ret;
+
+ for_each_child_of_node(dev->of_node, node) {
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 0, &service_name);
+ if (ret < 0)
+ continue;
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (ret < 0) {
+ dev_err(dev, "pdr service path missing: %d\n", ret);
+ of_node_put(node);
+ return ret;
+ }
+
+ pds = pdr_add_lookup(apr->pdr, service_name, service_path);
+ if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
+ dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
+ of_node_put(node);
+ return PTR_ERR(pds);
+ }
+ }
+
+ return 0;
+}
+
+static void of_register_apr_devices(struct device *dev, const char *svc_path)
{
struct apr *apr = dev_get_drvdata(dev);
struct device_node *node;
+ const char *service_path;
+ int ret;
for_each_child_of_node(dev->of_node, node) {
struct apr_device_id id = { {0} };
+ /*
+ * This function is called with svc_path NULL during
+ * apr_probe(), in which case we register any apr devices
+ * without a qcom,protection-domain specified.
+ *
+ * Then as the protection domains becomes available
+ * (if applicable) this function is again called, but with
+ * svc_path representing the service becoming available. In
+ * this case we register any apr devices with a matching
+ * qcom,protection-domain.
+ */
+
+ ret = of_property_read_string_index(node, "qcom,protection-domain",
+ 1, &service_path);
+ if (svc_path) {
+ /* skip APR services that are PD independent */
+ if (ret)
+ continue;
+
+ /* skip APR services whose PD paths don't match */
+ if (strcmp(service_path, svc_path))
+ continue;
+ } else {
+ /* skip APR services whose PD lookups are registered */
+ if (ret == 0)
+ continue;
+ }
+
if (of_property_read_u32(node, "reg", &id.svc_id))
continue;
@@ -318,6 +386,34 @@
}
}
+static int apr_remove_device(struct device *dev, void *svc_path)
+{
+ struct apr_device *adev = to_apr_device(dev);
+
+ if (svc_path && adev->service_path) {
+ if (!strcmp(adev->service_path, (char *)svc_path))
+ device_unregister(&adev->dev);
+ } else {
+ device_unregister(&adev->dev);
+ }
+
+ return 0;
+}
+
+static void apr_pd_status(int state, char *svc_path, void *priv)
+{
+ struct apr *apr = (struct apr *)priv;
+
+ switch (state) {
+ case SERVREG_SERVICE_STATE_UP:
+ of_register_apr_devices(apr->dev, svc_path);
+ break;
+ case SERVREG_SERVICE_STATE_DOWN:
+ device_for_each_child(apr->dev, svc_path, apr_remove_device);
+ break;
+ }
+}
+
static int apr_probe(struct rpmsg_device *rpdev)
{
struct device *dev = &rpdev->dev;
@@ -343,28 +439,39 @@
return -ENOMEM;
}
INIT_WORK(&apr->rx_work, apr_rxwq);
+
+ apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
+ if (IS_ERR(apr->pdr)) {
+ dev_err(dev, "Failed to init PDR handle\n");
+ ret = PTR_ERR(apr->pdr);
+ goto destroy_wq;
+ }
+
INIT_LIST_HEAD(&apr->rx_list);
spin_lock_init(&apr->rx_lock);
spin_lock_init(&apr->svcs_lock);
idr_init(&apr->svcs_idr);
- of_register_apr_devices(dev);
+
+ ret = of_apr_add_pd_lookups(dev);
+ if (ret)
+ goto handle_release;
+
+ of_register_apr_devices(dev, NULL);
return 0;
-}
-static int apr_remove_device(struct device *dev, void *null)
-{
- struct apr_device *adev = to_apr_device(dev);
-
- device_unregister(&adev->dev);
-
- return 0;
+handle_release:
+ pdr_handle_release(apr->pdr);
+destroy_wq:
+ destroy_workqueue(apr->rxwq);
+ return ret;
}
static void apr_remove(struct rpmsg_device *rpdev)
{
struct apr *apr = dev_get_drvdata(&rpdev->dev);
+ pdr_handle_release(apr->pdr);
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
flush_workqueue(apr->rxwq);
destroy_workqueue(apr->rxwq);
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index f6c3d17..fc56106 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
#include <soc/qcom/cmd-db.h>
@@ -236,6 +237,77 @@
}
EXPORT_SYMBOL(cmd_db_read_slave_id);
+#ifdef CONFIG_DEBUG_FS
+static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
+{
+ int i, j;
+ const struct rsc_hdr *rsc;
+ const struct entry_header *ent;
+ const char *name;
+ u16 len, version;
+ u8 major, minor;
+
+ seq_puts(seq, "Command DB DUMP\n");
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc = &cmd_db_header->header[i];
+ if (!rsc->slv_id)
+ break;
+
+ switch (le16_to_cpu(rsc->slv_id)) {
+ case CMD_DB_HW_ARC:
+ name = "ARC";
+ break;
+ case CMD_DB_HW_VRM:
+ name = "VRM";
+ break;
+ case CMD_DB_HW_BCM:
+ name = "BCM";
+ break;
+ default:
+ name = "Unknown";
+ break;
+ }
+
+ version = le16_to_cpu(rsc->version);
+ major = version >> 8;
+ minor = version;
+
+ seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor);
+ seq_puts(seq, "-------------------------\n");
+
+ ent = rsc_to_entry_header(rsc);
+ for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) {
+ seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr),
+ (int)sizeof(ent->id), ent->id);
+
+ len = le16_to_cpu(ent->len);
+ if (len) {
+ seq_printf(seq, " [%*ph]",
+ len, rsc_offset(rsc, ent));
+ }
+ seq_putc(seq, '\n');
+ }
+ }
+
+ return 0;
+}
+
+static int open_cmd_db_debugfs(struct inode *inode, struct file *file)
+{
+ return single_open(file, cmd_db_debugfs_dump, inode->i_private);
+}
+#endif
+
+static const struct file_operations cmd_db_debugfs_ops = {
+#ifdef CONFIG_DEBUG_FS
+ .open = open_cmd_db_debugfs,
+#endif
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int cmd_db_dev_probe(struct platform_device *pdev)
{
struct reserved_mem *rmem;
@@ -259,12 +331,14 @@
return -EINVAL;
}
+ debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops);
+
return 0;
}
static const struct of_device_id cmd_db_match_table[] = {
{ .compatible = "qcom,cmd-db" },
- { },
+ { }
};
static struct platform_driver cmd_db_dev_driver = {
diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
new file mode 100644
index 0000000..6298561
--- /dev/null
+++ b/drivers/soc/qcom/cpr.c
@@ -0,0 +1,1788 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/nvmem-consumer.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define FUSE_REVISION_UNKNOWN (-1)
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_fuse {
+ char *ring_osc;
+ char *init_voltage;
+ char *quotient;
+ char *quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_volt_scale;
+ int max_quot_scale;
+ /* fuse quot */
+ int quot_offset;
+ int quot_scale;
+ int quot_adjust;
+ /* fuse quot_offset */
+ int quot_offset_scale;
+ int quot_offset_adjust;
+};
+
+struct cpr_fuses {
+ int init_voltage_step;
+ int init_voltage_width;
+ struct fuse_corner_data *fuse_corner_data;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ int min_diff_quot;
+ int *step_quot;
+
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+
+ struct cpr_fuses cpr_fuses;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *config;
+ struct reg_sequence *settings;
+ int num_regs_per_fuse;
+};
+
+struct cpr_acc_desc {
+ const struct cpr_desc *cpr_desc;
+ const struct acc_desc *acc_desc;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_corners;
+ unsigned int ref_clk_khz;
+
+ struct generic_pm_domain pd;
+ struct device *dev;
+ struct device *attached_cpu_dev;
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct clk *cpu_clk;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ u32 gcnt;
+ unsigned long flags;
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct cpr_fuse *cpr_fuses;
+
+ struct dentry *debugfs;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ return !drv->loop_disabled;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ const struct cpr_desc *desc = drv->desc;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
+ ctl, irq);
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f == end)
+ return;
+
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir)
+{
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ return 0;
+}
+
+static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
+ int new_uV, enum voltage_change_dir dir)
+{
+ int ret;
+ struct fuse_corner *fuse_corner = corner->fuse_corner;
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
+ if (ret) {
+ dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
+ new_uV);
+ return ret;
+ }
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
+{
+ return drv->corner ? drv->corner - drv->corners + 1 : 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV, ret;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ if (dir != UP && dir != DOWN)
+ return 0;
+
+ step_uV = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_uV)
+ return -EINVAL;
+
+ corner = drv->corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->up_threshold,
+ desc->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_up_limit)
+ error_steps = desc->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ new_uV = min(new_uV, corner->max_uV);
+
+ dev_dbg(drv->dev,
+ "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ } else {
+ if (desc->clamp_timer_interval &&
+ error_steps < desc->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(desc->down_threshold,
+ desc->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > desc->vdd_apc_step_down_limit)
+ error_steps = desc->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ new_uV = max(new_uV, corner->min_uV);
+
+ dev_dbg(drv->dev,
+ "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
+ new_uV, last_uV, cpr_get_cur_perf_state(drv));
+ }
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret) {
+ cpr_irq_clr_nack(drv);
+ return ret;
+ }
+ drv->corner->last_uV = new_uV;
+
+ if (dir == UP) {
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else {
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = desc->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ }
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ return 0;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ const struct cpr_desc *desc = drv->desc;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ dev_dbg(drv->dev, "CPR is disabled\n");
+ ret = IRQ_NONE;
+ } else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
+ dev_dbg(drv->dev, "CPR measurement is not ready\n");
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ dev_err_ratelimited(drv->dev,
+ "Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ ret = IRQ_NONE;
+ } else {
+ /*
+ * Following sequence of handling is as per each IRQ's
+ * priority
+ */
+ if (val & CPR_INT_UP) {
+ cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
+ } else {
+ dev_dbg(drv->dev,
+ "IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ mutex_lock(&drv->lock);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ }
+
+ mutex_unlock(&drv->lock);
+
+ return regulator_disable(drv->vdd_apc);
+}
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+ const struct cpr_desc *desc = drv->desc;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW ceiling, floor and vlevel */
+ val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /*
+ * Clear the target quotient value and gate count of all
+ * ring oscillators
+ */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
+ desc->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+static int cpr_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
+ __func__, state, cpr_get_cur_perf_state(drv));
+
+ /*
+ * Determine new corner we're going to.
+ * Remove one since lowest performance state is 1.
+ */
+ corner = drv->corners + state - 1;
+ end = &drv->corners[drv->num_corners - 1];
+ if (corner > end || corner < drv->corners) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Determine direction */
+ if (drv->corner > corner)
+ dir = DOWN;
+ else if (drv->corner < corner)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ if (cpr_is_allowed(drv))
+ new_uV = corner->last_uV;
+ else
+ new_uV = corner->uV;
+
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_scale_voltage(drv, corner, new_uV, dir);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv)) {
+ cpr_irq_clr(drv);
+ if (drv->corner != corner)
+ cpr_corner_restore(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data)
+{
+ struct nvmem_cell *cell;
+ ssize_t len;
+ char *ret;
+ int i;
+
+ *data = 0;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) != -EPROBE_DEFER)
+ dev_err(dev, "undefined cell %s\n", cname);
+ return PTR_ERR(cell);
+ }
+
+ ret = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(ret)) {
+ dev_err(dev, "can't read cell %s\n", cname);
+ return PTR_ERR(ret);
+ }
+
+ for (i = 0; i < len; i++)
+ *data |= ret[i] << (8 * i);
+
+ kfree(ret);
+ dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len);
+
+ return 0;
+}
+
+static int
+cpr_populate_ring_osc_idx(struct cpr_drv *drv)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ u32 data;
+ int ret;
+
+ for (; fuse < end; fuse++, fuses++) {
+ ret = cpr_read_efuse(drv->dev, fuses->ring_osc,
+ &data);
+ if (ret)
+ return ret;
+ fuse->ring_osc_idx = data;
+ }
+
+ return 0;
+}
+
+static int cpr_read_fuse_uV(const struct cpr_desc *desc,
+ const struct fuse_corner_data *fdata,
+ const char *init_v_efuse,
+ int step_volt,
+ struct cpr_drv *drv)
+{
+ int step_size_uV, steps, uV;
+ u32 bits = 0;
+ int ret;
+
+ ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits);
+ if (ret)
+ return ret;
+
+ steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
+ /* Not two's complement.. instead highest bit is sign bit */
+ if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
+ steps = -steps;
+
+ step_size_uV = desc->cpr_fuses.init_voltage_step;
+
+ uV = fdata->ref_uV + steps * step_size_uV;
+ return DIV_ROUND_UP(uV, step_volt) * step_volt;
+}
+
+static int cpr_fuse_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int i;
+ unsigned int step_volt;
+ struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end;
+ int uV;
+ const struct reg_sequence *accs;
+ int ret;
+
+ accs = acc_desc->settings;
+
+ step_volt = regulator_get_linear_step(drv->vdd_apc);
+ if (!step_volt)
+ return -EINVAL;
+
+ /* Populate fuse_corner members */
+ fuse = drv->fuse_corners;
+ end = &fuse[desc->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+
+ for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
+ /*
+ * Update SoC voltages: platforms might choose a different
+ * regulators than the one used to characterize the algorithms
+ * (ie, init_voltage_step).
+ */
+ fdata->min_uV = roundup(fdata->min_uV, step_volt);
+ fdata->max_uV = roundup(fdata->max_uV, step_volt);
+
+ /* Populate uV */
+ uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
+ step_volt, drv);
+ if (uV < 0)
+ return uV;
+
+ fuse->min_uV = fdata->min_uV;
+ fuse->max_uV = fdata->max_uV;
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Populate target quotient by scaling */
+ ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot);
+ if (ret)
+ return ret;
+
+ fuse->quot *= fdata->quot_scale;
+ fuse->quot += fdata->quot_offset;
+ fuse->quot += fdata->quot_adjust;
+ fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
+
+ /* Populate acc settings */
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->min_uV,
+ fuse->min_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "min uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->min_uV, i);
+ return -EINVAL;
+ }
+
+ ret = regulator_is_supported_voltage(drv->vdd_apc,
+ fuse->max_uV,
+ fuse->max_uV);
+ if (!ret) {
+ dev_err(drv->dev,
+ "max uV: %d (fuse corner: %d) not supported by regulator\n",
+ fuse->max_uV, i);
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev,
+ "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
+ i, fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
+ }
+
+ return 0;
+}
+
+static int cpr_calculate_scaling(const char *quot_offset,
+ struct cpr_drv *drv,
+ const struct fuse_corner_data *fdata,
+ const struct corner *corner)
+{
+ u32 quot_diff = 0;
+ unsigned long freq_diff;
+ int scaling;
+ const struct fuse_corner *fuse, *prev_fuse;
+ int ret;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ if (quot_offset) {
+ ret = cpr_read_efuse(drv->dev, quot_offset, "_diff);
+ if (ret)
+ return ret;
+
+ quot_diff *= fdata->quot_offset_scale;
+ quot_diff += fdata->quot_offset_adjust;
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ return min(scaling, fdata->max_quot_scale);
+}
+
+static int cpr_interpolate(const struct corner *corner, int step_volt,
+ const struct fuse_corner_data *fdata)
+{
+ unsigned long f_high, f_low, f_diff;
+ int uV_high, uV_low, uV;
+ u64 temp, temp_limit;
+ const struct fuse_corner *fuse, *prev_fuse;
+
+ fuse = corner->fuse_corner;
+ prev_fuse = fuse - 1;
+
+ f_high = fuse->max_freq;
+ f_low = prev_fuse->max_freq;
+ uV_high = fuse->uV;
+ uV_low = prev_fuse->uV;
+ f_diff = fuse->max_freq - corner->freq;
+
+ /*
+ * Don't interpolate in the wrong direction. This could happen
+ * if the adjusted fuse voltage overlaps with the previous fuse's
+ * adjusted voltage.
+ */
+ if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
+ return corner->uV;
+
+ temp = f_diff * (uV_high - uV_low);
+ temp = div64_ul(temp, f_high - f_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = f_diff * fdata->max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ uV = uV_high - min(temp, temp_limit);
+ return roundup(uV, step_volt);
+}
+
+static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
+{
+ struct device_node *np;
+ unsigned int fuse_corner = 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+ if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
+ pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
+ __func__);
+
+ of_node_put(np);
+
+ return fuse_corner;
+}
+
+static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
+ struct device *cpu_dev)
+{
+ u64 rate = 0;
+ struct device_node *ref_np;
+ struct device_node *desc_np;
+ struct device_node *child_np = NULL;
+ struct device_node *child_req_np = NULL;
+
+ desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
+ if (!desc_np)
+ return 0;
+
+ ref_np = dev_pm_opp_get_of_node(ref);
+ if (!ref_np)
+ goto out_ref;
+
+ do {
+ of_node_put(child_req_np);
+ child_np = of_get_next_available_child(desc_np, child_np);
+ child_req_np = of_parse_phandle(child_np, "required-opps", 0);
+ } while (child_np && child_req_np != ref_np);
+
+ if (child_np && child_req_np == ref_np)
+ of_property_read_u64(child_np, "opp-hz", &rate);
+
+ of_node_put(child_req_np);
+ of_node_put(child_np);
+ of_node_put(ref_np);
+out_ref:
+ of_node_put(desc_np);
+
+ return (unsigned long) rate;
+}
+
+static int cpr_corner_init(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ const struct cpr_fuse *fuses = drv->cpr_fuses;
+ int i, level, scaling = 0;
+ unsigned int fnum, fc;
+ const char *quot_offset;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ struct corner_data *cdata;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling;
+ unsigned long freq_diff, freq_diff_mhz;
+ unsigned long freq;
+ int step_volt = regulator_get_linear_step(drv->vdd_apc);
+ struct dev_pm_opp *opp;
+
+ if (!step_volt)
+ return -EINVAL;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+
+ cdata = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(struct corner_data),
+ GFP_KERNEL);
+ if (!cdata)
+ return -ENOMEM;
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (level = 1; level <= drv->num_corners; level++) {
+ opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
+ if (IS_ERR(opp))
+ return -EINVAL;
+ fc = cpr_get_fuse_corner(opp);
+ if (!fc) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ fnum = fc - 1;
+ freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
+ if (!freq) {
+ dev_pm_opp_put(opp);
+ return -EINVAL;
+ }
+ cdata[level - 1].fuse_corner = fnum;
+ cdata[level - 1].freq = freq;
+
+ fuse = &drv->fuse_corners[fnum];
+ dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
+ freq, dev_pm_opp_get_level(opp) - 1, fnum);
+ if (freq > fuse->max_freq)
+ fuse->max_freq = freq;
+ dev_pm_opp_put(opp);
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ fnum = cdata[i].fuse_corner;
+ fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
+ quot_offset = fuses[fnum].quotient_offset;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->freq = cdata[i].freq;
+ corner->uV = fuse->uV;
+
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ scaling = cpr_calculate_scaling(quot_offset, drv,
+ fdata, corner);
+ if (scaling < 0)
+ return scaling;
+
+ apply_scaling = true;
+ } else if (corner->freq == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - corner->freq;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ corner->uV = cpr_interpolate(corner, step_volt, fdata);
+ }
+
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+
+ return 0;
+}
+
+static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct cpr_fuse *fuses;
+ int i;
+
+ fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
+ sizeof(struct cpr_fuse),
+ GFP_KERNEL);
+ if (!fuses)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < desc->num_fuse_corners; i++) {
+ char tbuf[32];
+
+ snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
+ fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].ring_osc)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
+ fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].init_voltage)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
+ fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
+ if (!fuses[i].quotient)
+ return ERR_PTR(-ENOMEM);
+
+ snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
+ fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
+ GFP_KERNEL);
+ if (!fuses[i].quotient_offset)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return fuses;
+}
+
+static void cpr_set_loop_allowed(struct cpr_drv *drv)
+{
+ drv->loop_disabled = false;
+}
+
+static int cpr_init_parameters(struct cpr_drv *drv)
+{
+ const struct cpr_desc *desc = drv->desc;
+ struct clk *clk;
+
+ clk = clk_get(drv->dev, "ref");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ drv->ref_clk_khz = clk_get_rate(clk) / 1000;
+ clk_put(clk);
+
+ if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
+ desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
+ desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
+ desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
+ desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
+ desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+ return -EINVAL;
+
+ dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
+ desc->up_threshold, desc->down_threshold);
+
+ return 0;
+}
+
+static int cpr_find_initial_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ const struct corner *end;
+ struct corner *iter;
+ unsigned int i = 0;
+
+ if (!drv->cpu_clk) {
+ dev_err(drv->dev, "cannot get rate from NULL clk\n");
+ return -EINVAL;
+ }
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ /*
+ * Some bootloaders set a CPU clock frequency that is not defined
+ * in the OPP table. When running at an unlisted frequency,
+ * cpufreq_online() will change to the OPP which has the lowest
+ * frequency, at or above the unlisted frequency.
+ * Since cpufreq_online() always "rounds up" in the case of an
+ * unlisted frequency, this function always "rounds down" in case
+ * of an unlisted frequency. That way, when cpufreq_online()
+ * triggers the first ever call to cpr_set_performance_state(),
+ * it will correctly determine the direction as UP.
+ */
+ for (iter = drv->corners; iter <= end; iter++) {
+ if (iter->freq > rate)
+ break;
+ i++;
+ if (iter->freq == rate) {
+ drv->corner = iter;
+ break;
+ }
+ if (iter->freq < rate)
+ drv->corner = iter;
+ }
+
+ if (!drv->corner) {
+ dev_err(drv->dev, "boot up corner not found\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(drv->dev, "boot up perf state: %u\n", i);
+
+ return 0;
+}
+
+static const struct cpr_desc qcs404_cpr_desc = {
+ .num_fuse_corners = 3,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 25, 25, 25, },
+ .timer_delay_us = 5000,
+ .timer_cons_up = 0,
+ .timer_cons_down = 2,
+ .up_threshold = 1,
+ .down_threshold = 3,
+ .idle_clocks = 15,
+ .gcnt_us = 1,
+ .vdd_apc_step_up_limit = 1,
+ .vdd_apc_step_down_limit = 1,
+ .cpr_fuses = {
+ .init_voltage_step = 8000,
+ .init_voltage_width = 6,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* fuse corner 0 */
+ {
+ .ref_uV = 1224000,
+ .max_uV = 1224000,
+ .min_uV = 1048000,
+ .max_volt_scale = 0,
+ .max_quot_scale = 0,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 1 */
+ {
+ .ref_uV = 1288000,
+ .max_uV = 1288000,
+ .min_uV = 1048000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = -20,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ /* fuse corner 2 */
+ {
+ .ref_uV = 1352000,
+ .max_uV = 1384000,
+ .min_uV = 1088000,
+ .max_volt_scale = 2000,
+ .max_quot_scale = 1400,
+ .quot_offset = 0,
+ .quot_scale = 1,
+ .quot_adjust = 0,
+ .quot_offset_scale = 5,
+ .quot_offset_adjust = 0,
+ },
+ },
+ },
+};
+
+static const struct acc_desc qcs404_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xb120, 0x1041040 },
+ { 0xb124, 0x41 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ { 0xb120, 0x0 },
+ { 0xb124, 0x0 },
+ },
+ .config = (struct reg_sequence[]){
+ { 0xb138, 0xff },
+ { 0xb130, 0x5555 },
+ },
+ .num_regs_per_fuse = 2,
+};
+
+static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
+ .cpr_desc = &qcs404_cpr_desc,
+ .acc_desc = &qcs404_acc_desc,
+};
+
+static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int cpr_power_off(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_disable(drv);
+}
+
+static int cpr_power_on(struct generic_pm_domain *domain)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+
+ return cpr_enable(drv);
+}
+
+static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
+ const struct acc_desc *acc_desc = drv->acc_desc;
+ int ret = 0;
+
+ mutex_lock(&drv->lock);
+
+ dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
+
+ /*
+ * This driver only supports scaling voltage for a CPU cluster
+ * where all CPUs in the cluster share a single regulator.
+ * Therefore, save the struct device pointer only for the first
+ * CPU device that gets attached. There is no need to do any
+ * additional initialization when further CPUs get attached.
+ */
+ if (drv->attached_cpu_dev)
+ goto unlock;
+
+ /*
+ * cpr_scale_voltage() requires the direction (if we are changing
+ * to a higher or lower OPP). The first time
+ * cpr_set_performance_state() is called, there is no previous
+ * performance state defined. Therefore, we call
+ * cpr_find_initial_corner() that gets the CPU clock frequency
+ * set by the bootloader, so that we can determine the direction
+ * the first time cpr_set_performance_state() is called.
+ */
+ drv->cpu_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(drv->cpu_clk)) {
+ ret = PTR_ERR(drv->cpu_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
+ goto unlock;
+ }
+ drv->attached_cpu_dev = dev;
+
+ dev_dbg(drv->dev, "using cpu clk from: %s\n",
+ dev_name(drv->attached_cpu_dev));
+
+ /*
+ * Everything related to (virtual) corners has to be initialized
+ * here, when attaching to the power domain, since we need to know
+ * the maximum frequency for each fuse corner, and this is only
+ * available after the cpufreq driver has attached to us.
+ * The reason for this is that we need to know the highest
+ * frequency associated with each fuse corner.
+ */
+ ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
+ if (ret < 0) {
+ dev_err(drv->dev, "could not get OPP count\n");
+ goto unlock;
+ }
+ drv->num_corners = ret;
+
+ if (drv->num_corners < 2) {
+ dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
+ sizeof(*drv->corners),
+ GFP_KERNEL);
+ if (!drv->corners) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = cpr_corner_init(drv);
+ if (ret)
+ goto unlock;
+
+ cpr_set_loop_allowed(drv);
+
+ ret = cpr_init_parameters(drv);
+ if (ret)
+ goto unlock;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ goto unlock;
+
+ ret = cpr_find_initial_corner(drv);
+ if (ret)
+ goto unlock;
+
+ if (acc_desc->config)
+ regmap_multi_reg_write(drv->tcsr, acc_desc->config,
+ acc_desc->num_regs_per_fuse);
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+ dev_info(drv->dev, "driver initialized with %u OPPs\n",
+ drv->num_corners);
+
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static int cpr_debug_info_show(struct seq_file *s, void *unused)
+{
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ struct cpr_drv *drv = s->private;
+ struct fuse_corner *fuse_corner;
+ struct corner *corner;
+
+ corner = drv->corner;
+ fuse_corner = corner->fuse_corner;
+
+ seq_printf(s, "corner, current_volt = %d uV\n",
+ corner->last_uV);
+
+ ro_sel = fuse_corner->ring_osc_idx;
+ gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
+ seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
+
+ ctl = cpr_read(drv, REG_RBCPR_CTL);
+ seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
+
+ irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
+
+ reg = cpr_read(drv, REG_RBCPR_RESULT_0);
+ seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ seq_printf(s, " [step_dn = %u", step_dn);
+
+ seq_printf(s, ", step_up = %u", step_up);
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ seq_printf(s, ", error_steps = %u", error_steps);
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ seq_printf(s, ", error = %u", error);
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ seq_printf(s, ", error_lt_0 = %u", error_lt0);
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ seq_printf(s, ", busy = %u]\n", busy);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
+
+static void cpr_debugfs_init(struct cpr_drv *drv)
+{
+ drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
+
+ debugfs_create_file("debug_info", 0444, drv->debugfs,
+ drv, &cpr_debug_info_fops);
+}
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ int irq, ret;
+ const struct cpr_acc_desc *data;
+ struct device_node *np;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+
+ data = of_device_get_match_data(dev);
+ if (!data || !data->cpr_desc || !data->acc_desc)
+ return -EINVAL;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+ drv->dev = dev;
+ drv->desc = data->cpr_desc;
+ drv->acc_desc = data->acc_desc;
+
+ drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
+ sizeof(*drv->fuse_corners),
+ GFP_KERNEL);
+ if (!drv->fuse_corners)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ drv->tcsr = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+
+ /*
+ * Initialize fuse corners, since it simply depends
+ * on data in efuses.
+ * Everything related to (virtual) corners has to be
+ * initialized after attaching to the power domain,
+ * since it depends on the CPU's OPP table.
+ */
+ ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev);
+ if (ret)
+ return ret;
+
+ drv->cpr_fuses = cpr_get_fuses(drv);
+ if (IS_ERR(drv->cpr_fuses))
+ return PTR_ERR(drv->cpr_fuses);
+
+ ret = cpr_populate_ring_osc_idx(drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_fuse_corner_init(drv);
+ if (ret)
+ return ret;
+
+ mutex_init(&drv->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
+ GFP_KERNEL);
+ if (!drv->pd.name)
+ return -EINVAL;
+
+ drv->pd.power_off = cpr_power_off;
+ drv->pd.power_on = cpr_power_on;
+ drv->pd.set_performance_state = cpr_set_performance_state;
+ drv->pd.opp_to_performance_state = cpr_get_performance_state;
+ drv->pd.attach_dev = cpr_pd_attach_dev;
+
+ ret = pm_genpd_init(&drv->pd, NULL, true);
+ if (ret)
+ return ret;
+
+ ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drv);
+ cpr_debugfs_init(drv);
+
+ return 0;
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&drv->pd);
+
+ debugfs_remove_recursive(drv->debugfs);
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
deleted file mode 100644
index d7babe3..0000000
--- a/drivers/soc/qcom/glink_ssr.c
+++ /dev/null
@@ -1,156 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- * Copyright (c) 2017, Linaro Ltd.
- */
-
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/rpmsg.h>
-#include <linux/remoteproc/qcom_rproc.h>
-
-/**
- * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
- * version: The G-Link SSR protocol version
- * command: The G-Link SSR command - do_cleanup
- * seq_num: Sequence number
- * name_len: Length of the name of the subsystem being restarted
- * name: G-Link edge name of the subsystem being restarted
- */
-struct do_cleanup_msg {
- __le32 version;
- __le32 command;
- __le32 seq_num;
- __le32 name_len;
- char name[32];
-};
-
-/**
- * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
- * version: The G-Link SSR protocol version
- * response: The G-Link SSR response to a do_cleanup command, cleanup_done
- * seq_num: Sequence number
- */
-struct cleanup_done_msg {
- __le32 version;
- __le32 response;
- __le32 seq_num;
-};
-
-/**
- * G-Link SSR protocol commands
- */
-#define GLINK_SSR_DO_CLEANUP 0
-#define GLINK_SSR_CLEANUP_DONE 1
-
-struct glink_ssr {
- struct device *dev;
- struct rpmsg_endpoint *ept;
-
- struct notifier_block nb;
-
- u32 seq_num;
- struct completion completion;
-};
-
-static int qcom_glink_ssr_callback(struct rpmsg_device *rpdev,
- void *data, int len, void *priv, u32 addr)
-{
- struct cleanup_done_msg *msg = data;
- struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
-
- if (len < sizeof(*msg)) {
- dev_err(ssr->dev, "message too short\n");
- return -EINVAL;
- }
-
- if (le32_to_cpu(msg->version) != 0)
- return -EINVAL;
-
- if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE)
- return 0;
-
- if (le32_to_cpu(msg->seq_num) != ssr->seq_num) {
- dev_err(ssr->dev, "invalid sequence number of response\n");
- return -EINVAL;
- }
-
- complete(&ssr->completion);
-
- return 0;
-}
-
-static int qcom_glink_ssr_notify(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- struct glink_ssr *ssr = container_of(nb, struct glink_ssr, nb);
- struct do_cleanup_msg msg;
- char *ssr_name = data;
- int ret;
-
- ssr->seq_num++;
- reinit_completion(&ssr->completion);
-
- memset(&msg, 0, sizeof(msg));
- msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
- msg.seq_num = cpu_to_le32(ssr->seq_num);
- msg.name_len = cpu_to_le32(strlen(ssr_name));
- strlcpy(msg.name, ssr_name, sizeof(msg.name));
-
- ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
- if (ret < 0)
- dev_err(ssr->dev, "failed to send cleanup message\n");
-
- ret = wait_for_completion_timeout(&ssr->completion, HZ);
- if (!ret)
- dev_err(ssr->dev, "timeout waiting for cleanup done message\n");
-
- return NOTIFY_DONE;
-}
-
-static int qcom_glink_ssr_probe(struct rpmsg_device *rpdev)
-{
- struct glink_ssr *ssr;
-
- ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
- if (!ssr)
- return -ENOMEM;
-
- init_completion(&ssr->completion);
-
- ssr->dev = &rpdev->dev;
- ssr->ept = rpdev->ept;
- ssr->nb.notifier_call = qcom_glink_ssr_notify;
-
- dev_set_drvdata(&rpdev->dev, ssr);
-
- return qcom_register_ssr_notifier(&ssr->nb);
-}
-
-static void qcom_glink_ssr_remove(struct rpmsg_device *rpdev)
-{
- struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
-
- qcom_unregister_ssr_notifier(&ssr->nb);
-}
-
-static const struct rpmsg_device_id qcom_glink_ssr_match[] = {
- { "glink_ssr" },
- {}
-};
-
-static struct rpmsg_driver qcom_glink_ssr_driver = {
- .probe = qcom_glink_ssr_probe,
- .remove = qcom_glink_ssr_remove,
- .callback = qcom_glink_ssr_callback,
- .id_table = qcom_glink_ssr_match,
- .drv = {
- .name = "qcom_glink_ssr",
- },
-};
-module_rpmsg_driver(qcom_glink_ssr_driver);
-
-MODULE_ALIAS("rpmsg:glink_ssr");
-MODULE_DESCRIPTION("Qualcomm GLINK SSR notifier");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
new file mode 100644
index 0000000..c20cb92
--- /dev/null
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include <asm/sysreg.h>
+#include <soc/qcom/kryo-l2-accessors.h>
+
+#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6)
+#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7)
+
+static DEFINE_RAW_SPINLOCK(l2_access_lock);
+
+/**
+ * kryo_l2_set_indirect_reg() - write value to an L2 register
+ * @reg: Address of L2 register.
+ * @value: Value to be written to register.
+ *
+ * Use architecturally required barriers for ordering between system register
+ * accesses, and system registers with respect to device memory
+ */
+void kryo_l2_set_indirect_reg(u64 reg, u64 val)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2_access_lock, flags);
+ write_sysreg_s(reg, L2CPUSRSELR_EL1);
+ isb();
+ write_sysreg_s(val, L2CPUSRDR_EL1);
+ isb();
+ raw_spin_unlock_irqrestore(&l2_access_lock, flags);
+}
+EXPORT_SYMBOL(kryo_l2_set_indirect_reg);
+
+/**
+ * kryo_l2_get_indirect_reg() - read an L2 register value
+ * @reg: Address of L2 register.
+ *
+ * Use architecturally required barriers for ordering between system register
+ * accesses, and system registers with respect to device memory
+ */
+u64 kryo_l2_get_indirect_reg(u64 reg)
+{
+ u64 val;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&l2_access_lock, flags);
+ write_sysreg_s(reg, L2CPUSRSELR_EL1);
+ isb();
+ val = read_sysreg_s(L2CPUSRDR_EL1);
+ raw_spin_unlock_irqrestore(&l2_access_lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(kryo_l2_get_indirect_reg);
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-qcom.c
similarity index 68%
rename from drivers/soc/qcom/llcc-slice.c
rename to drivers/soc/qcom/llcc-qcom.c
index 4a61116..70fbe70 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
*/
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
@@ -46,15 +47,90 @@
#define BANK_OFFSET_STRIDE 0x80000
-static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
-
-static struct regmap_config llcc_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .fast_io = true,
+/**
+ * llcc_slice_config - Data associated with the llcc slice
+ * @usecase_id: Unique id for the client's use case
+ * @slice_id: llcc slice id for each client
+ * @max_cap: The maximum capacity of the cache slice provided in KB
+ * @priority: Priority of the client used to select victim line for replacement
+ * @fixed_size: Boolean indicating if the slice has a fixed capacity
+ * @bonus_ways: Bonus ways are additional ways to be used for any slice,
+ * if client ends up using more than reserved cache ways. Bonus
+ * ways are allocated only if they are not reserved for some
+ * other client.
+ * @res_ways: Reserved ways for the cache slice, the reserved ways cannot
+ * be used by any other client than the one its assigned to.
+ * @cache_mode: Each slice operates as a cache, this controls the mode of the
+ * slice: normal or TCM(Tightly Coupled Memory)
+ * @probe_target_ways: Determines what ways to probe for access hit. When
+ * configured to 1 only bonus and reserved ways are probed.
+ * When configured to 0 all ways in llcc are probed.
+ * @dis_cap_alloc: Disable capacity based allocation for a client
+ * @retain_on_pc: If this bit is set and client has maintained active vote
+ * then the ways assigned to this client are not flushed on power
+ * collapse.
+ * @activate_on_init: Activate the slice immediately after it is programmed
+ */
+struct llcc_slice_config {
+ u32 usecase_id;
+ u32 slice_id;
+ u32 max_cap;
+ u32 priority;
+ bool fixed_size;
+ u32 bonus_ways;
+ u32 res_ways;
+ u32 cache_mode;
+ u32 probe_target_ways;
+ bool dis_cap_alloc;
+ bool retain_on_pc;
+ bool activate_on_init;
};
+struct qcom_llcc_config {
+ const struct llcc_slice_config *sct_data;
+ int size;
+};
+
+static const struct llcc_slice_config sc7180_data[] = {
+ { LLCC_CPUSS, 1, 256, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 128, 1, 0, 0xf, 0x0, 0, 0, 0, 1, 0 },
+};
+
+static const struct llcc_slice_config sdm845_data[] = {
+ { LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0 },
+ { LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0 },
+ { LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1 },
+ { LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0 },
+ { LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
+};
+
+static const struct qcom_llcc_config sc7180_cfg = {
+ .sct_data = sc7180_data,
+ .size = ARRAY_SIZE(sc7180_data),
+};
+
+static const struct qcom_llcc_config sdm845_cfg = {
+ .sct_data = sdm845_data,
+ .size = ARRAY_SIZE(sdm845_data),
+};
+
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
+
/**
* llcc_slice_getd - get llcc slice descriptor
* @uid: usecase_id for the client
@@ -301,25 +377,25 @@
return ret;
}
-int qcom_llcc_remove(struct platform_device *pdev)
+static int qcom_llcc_remove(struct platform_device *pdev)
{
/* Set the global pointer to a error code to avoid referencing it */
drv_data = ERR_PTR(-ENODEV);
return 0;
}
-EXPORT_SYMBOL_GPL(qcom_llcc_remove);
static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
const char *name)
{
- struct resource *res;
void __iomem *base;
+ struct regmap_config llcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ };
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
- if (!res)
- return ERR_PTR(-ENODEV);
-
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource_byname(pdev, name);
if (IS_ERR(base))
return ERR_CAST(base);
@@ -327,13 +403,15 @@
return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
}
-int qcom_llcc_probe(struct platform_device *pdev,
- const struct llcc_slice_config *llcc_cfg, u32 sz)
+static int qcom_llcc_probe(struct platform_device *pdev)
{
u32 num_banks;
struct device *dev = &pdev->dev;
int ret, i;
struct platform_device *llcc_edac;
+ const struct qcom_llcc_config *cfg;
+ const struct llcc_slice_config *llcc_cfg;
+ u32 sz;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
@@ -363,6 +441,10 @@
num_banks >>= LLCC_LB_CNT_SHIFT;
drv_data->num_banks = num_banks;
+ cfg = of_device_get_match_data(&pdev->dev);
+ llcc_cfg = cfg->sct_data;
+ sz = cfg->size;
+
for (i = 0; i < sz; i++)
if (llcc_cfg[i].slice_id > drv_data->max_slices)
drv_data->max_slices = llcc_cfg[i].slice_id;
@@ -408,6 +490,22 @@
drv_data = ERR_PTR(-ENODEV);
return ret;
}
-EXPORT_SYMBOL_GPL(qcom_llcc_probe);
-MODULE_LICENSE("GPL v2");
+
+static const struct of_device_id qcom_llcc_of_match[] = {
+ { .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
+ { .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { }
+};
+
+static struct platform_driver qcom_llcc_driver = {
+ .driver = {
+ .name = "qcom-llcc",
+ .of_match_table = qcom_llcc_of_match,
+ },
+ .probe = qcom_llcc_probe,
+ .remove = qcom_llcc_remove,
+};
+module_platform_driver(qcom_llcc_driver);
+
MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
deleted file mode 100644
index 86600d9..0000000
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/soc/qcom/llcc-qcom.h>
-
-/*
- * SCT(System Cache Table) entry contains of the following members:
- * usecase_id: Unique id for the client's use case
- * slice_id: llcc slice id for each client
- * max_cap: The maximum capacity of the cache slice provided in KB
- * priority: Priority of the client used to select victim line for replacement
- * fixed_size: Boolean indicating if the slice has a fixed capacity
- * bonus_ways: Bonus ways are additional ways to be used for any slice,
- * if client ends up using more than reserved cache ways. Bonus
- * ways are allocated only if they are not reserved for some
- * other client.
- * res_ways: Reserved ways for the cache slice, the reserved ways cannot
- * be used by any other client than the one its assigned to.
- * cache_mode: Each slice operates as a cache, this controls the mode of the
- * slice: normal or TCM(Tightly Coupled Memory)
- * probe_target_ways: Determines what ways to probe for access hit. When
- * configured to 1 only bonus and reserved ways are probed.
- * When configured to 0 all ways in llcc are probed.
- * dis_cap_alloc: Disable capacity based allocation for a client
- * retain_on_pc: If this bit is set and client has maintained active vote
- * then the ways assigned to this client are not flushed on power
- * collapse.
- * activate_on_init: Activate the slice immediately after the SCT is programmed
- */
-#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
- { \
- .usecase_id = uid, \
- .slice_id = sid, \
- .max_cap = mc, \
- .priority = p, \
- .fixed_size = fs, \
- .bonus_ways = bway, \
- .res_ways = rway, \
- .cache_mode = cmod, \
- .probe_target_ways = ptw, \
- .dis_cap_alloc = dca, \
- .retain_on_pc = rp, \
- .activate_on_init = a, \
- }
-
-static struct llcc_slice_config sdm845_data[] = {
- SCT_ENTRY(LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1),
- SCT_ENTRY(LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1),
- SCT_ENTRY(LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
- SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
-};
-
-static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
-{
- return qcom_llcc_remove(pdev);
-}
-
-static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
-{
- return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
-}
-
-static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
- { .compatible = "qcom,sdm845-llcc", },
- { }
-};
-
-static struct platform_driver sdm845_qcom_llcc_driver = {
- .driver = {
- .name = "sdm845-llcc",
- .of_match_table = sdm845_qcom_llcc_of_match,
- },
- .probe = sdm845_qcom_llcc_probe,
- .remove = sdm845_qcom_llcc_remove,
-};
-module_platform_driver(sdm845_qcom_llcc_driver);
-
-MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index eba7f76..6034cd8 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -98,7 +98,7 @@
if (ehdr->e_phnum < 2)
return ERR_PTR(-EINVAL);
- if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD)
+ if (phdrs[0].p_type == PT_LOAD)
return ERR_PTR(-EINVAL);
if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
new file mode 100644
index 0000000..f1875dc
--- /dev/null
+++ b/drivers/soc/qcom/ocmem.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The On Chip Memory (OCMEM) allocator allows various clients to allocate
+ * memory from OCMEM based on performance, latency and power requirements.
+ * This is typically used by the GPU, camera/video, and audio components on
+ * some Snapdragon SoCs.
+ *
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <soc/qcom/ocmem.h>
+
+enum region_mode {
+ WIDE_MODE = 0x0,
+ THIN_MODE,
+ MODE_DEFAULT = WIDE_MODE,
+};
+
+enum ocmem_macro_state {
+ PASSTHROUGH = 0,
+ PERI_ON = 1,
+ CORE_ON = 2,
+ CLK_OFF = 4,
+};
+
+struct ocmem_region {
+ bool interleaved;
+ enum region_mode mode;
+ unsigned int num_macros;
+ enum ocmem_macro_state macro_state[4];
+ unsigned long macro_size;
+ unsigned long region_size;
+};
+
+struct ocmem_config {
+ uint8_t num_regions;
+ unsigned long macro_size;
+};
+
+struct ocmem {
+ struct device *dev;
+ const struct ocmem_config *config;
+ struct resource *memory;
+ void __iomem *mmio;
+ unsigned int num_ports;
+ unsigned int num_macros;
+ bool interleaved;
+ struct ocmem_region *regions;
+ unsigned long active_allocations;
+};
+
+#define OCMEM_MIN_ALIGN SZ_64K
+#define OCMEM_MIN_ALLOC SZ_64K
+
+#define OCMEM_REG_HW_VERSION 0x00000000
+#define OCMEM_REG_HW_PROFILE 0x00000004
+
+#define OCMEM_REG_REGION_MODE_CTL 0x00001000
+#define OCMEM_REGION_MODE_CTL_REG0_THIN 0x00000001
+#define OCMEM_REGION_MODE_CTL_REG1_THIN 0x00000002
+#define OCMEM_REGION_MODE_CTL_REG2_THIN 0x00000004
+#define OCMEM_REGION_MODE_CTL_REG3_THIN 0x00000008
+
+#define OCMEM_REG_GFX_MPU_START 0x00001004
+#define OCMEM_REG_GFX_MPU_END 0x00001008
+
+#define OCMEM_HW_PROFILE_NUM_PORTS(val) FIELD_PREP(0x0000000f, (val))
+#define OCMEM_HW_PROFILE_NUM_MACROS(val) FIELD_PREP(0x00003f00, (val))
+
+#define OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE 0x00010000
+#define OCMEM_HW_PROFILE_INTERLEAVING 0x00020000
+#define OCMEM_REG_GEN_STATUS 0x0000000c
+
+#define OCMEM_REG_PSGSC_STATUS 0x00000038
+#define OCMEM_REG_PSGSC_CTL(i0) (0x0000003c + 0x1*(i0))
+
+#define OCMEM_PSGSC_CTL_MACRO0_MODE(val) FIELD_PREP(0x00000007, (val))
+#define OCMEM_PSGSC_CTL_MACRO1_MODE(val) FIELD_PREP(0x00000070, (val))
+#define OCMEM_PSGSC_CTL_MACRO2_MODE(val) FIELD_PREP(0x00000700, (val))
+#define OCMEM_PSGSC_CTL_MACRO3_MODE(val) FIELD_PREP(0x00007000, (val))
+
+#define OCMEM_CLK_CORE_IDX 0
+static struct clk_bulk_data ocmem_clks[] = {
+ {
+ .id = "core",
+ },
+ {
+ .id = "iface",
+ },
+};
+
+static inline void ocmem_write(struct ocmem *ocmem, u32 reg, u32 data)
+{
+ writel(data, ocmem->mmio + reg);
+}
+
+static inline u32 ocmem_read(struct ocmem *ocmem, u32 reg)
+{
+ return readl(ocmem->mmio + reg);
+}
+
+static void update_ocmem(struct ocmem *ocmem)
+{
+ uint32_t region_mode_ctrl = 0x0;
+ int i;
+
+ if (!qcom_scm_ocmem_lock_available()) {
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (region->mode == THIN_MODE)
+ region_mode_ctrl |= BIT(i);
+ }
+
+ dev_dbg(ocmem->dev, "ocmem_region_mode_control %x\n",
+ region_mode_ctrl);
+ ocmem_write(ocmem, OCMEM_REG_REGION_MODE_CTL, region_mode_ctrl);
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+ u32 data;
+
+ data = OCMEM_PSGSC_CTL_MACRO0_MODE(region->macro_state[0]) |
+ OCMEM_PSGSC_CTL_MACRO1_MODE(region->macro_state[1]) |
+ OCMEM_PSGSC_CTL_MACRO2_MODE(region->macro_state[2]) |
+ OCMEM_PSGSC_CTL_MACRO3_MODE(region->macro_state[3]);
+
+ ocmem_write(ocmem, OCMEM_REG_PSGSC_CTL(i), data);
+ }
+}
+
+static unsigned long phys_to_offset(struct ocmem *ocmem,
+ unsigned long addr)
+{
+ if (addr < ocmem->memory->start || addr >= ocmem->memory->end)
+ return 0;
+
+ return addr - ocmem->memory->start;
+}
+
+static unsigned long device_address(struct ocmem *ocmem,
+ enum ocmem_client client,
+ unsigned long addr)
+{
+ WARN_ON(client != OCMEM_GRAPHICS);
+
+ /* TODO: gpu uses phys_to_offset, but others do not.. */
+ return phys_to_offset(ocmem, addr);
+}
+
+static void update_range(struct ocmem *ocmem, struct ocmem_buf *buf,
+ enum ocmem_macro_state mstate, enum region_mode rmode)
+{
+ unsigned long offset = 0;
+ int i, j;
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (buf->offset <= offset && offset < buf->offset + buf->len)
+ region->mode = rmode;
+
+ for (j = 0; j < region->num_macros; j++) {
+ if (buf->offset <= offset &&
+ offset < buf->offset + buf->len)
+ region->macro_state[j] = mstate;
+
+ offset += region->macro_size;
+ }
+ }
+
+ update_ocmem(ocmem);
+}
+
+struct ocmem *of_get_ocmem(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *devnode;
+ struct ocmem *ocmem;
+
+ devnode = of_parse_phandle(dev->of_node, "sram", 0);
+ if (!devnode || !devnode->parent) {
+ dev_err(dev, "Cannot look up sram phandle\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(devnode->parent);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", devnode->name);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ ocmem = platform_get_drvdata(pdev);
+ if (!ocmem) {
+ dev_err(dev, "Cannot get ocmem\n");
+ return ERR_PTR(-ENODEV);
+ }
+ return ocmem;
+}
+EXPORT_SYMBOL(of_get_ocmem);
+
+struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
+ unsigned long size)
+{
+ struct ocmem_buf *buf;
+ int ret;
+
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return ERR_PTR(-ENODEV);
+
+ if (size < OCMEM_MIN_ALLOC || !IS_ALIGNED(size, OCMEM_MIN_ALIGN))
+ return ERR_PTR(-EINVAL);
+
+ if (test_and_set_bit_lock(BIT(client), &ocmem->active_allocations))
+ return ERR_PTR(-EBUSY);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ buf->offset = 0;
+ buf->addr = device_address(ocmem, client, buf->offset);
+ buf->len = size;
+
+ update_range(ocmem, buf, CORE_ON, WIDE_MODE);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ ret = qcom_scm_ocmem_lock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len, WIDE_MODE);
+ if (ret) {
+ dev_err(ocmem->dev, "could not lock: %d\n", ret);
+ ret = -EINVAL;
+ goto err_kfree;
+ }
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, buf->offset);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END,
+ buf->offset + buf->len);
+ }
+
+ dev_dbg(ocmem->dev, "using %ldK of OCMEM at 0x%08lx for client %d\n",
+ size / 1024, buf->addr, client);
+
+ return buf;
+
+err_kfree:
+ kfree(buf);
+err_unlock:
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(ocmem_allocate);
+
+void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
+ struct ocmem_buf *buf)
+{
+ /* TODO: add support for other clients... */
+ if (WARN_ON(client != OCMEM_GRAPHICS))
+ return;
+
+ update_range(ocmem, buf, CLK_OFF, MODE_DEFAULT);
+
+ if (qcom_scm_ocmem_lock_available()) {
+ int ret;
+
+ ret = qcom_scm_ocmem_unlock(QCOM_SCM_OCMEM_GRAPHICS_ID,
+ buf->offset, buf->len);
+ if (ret)
+ dev_err(ocmem->dev, "could not unlock: %d\n", ret);
+ } else {
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_START, 0x0);
+ ocmem_write(ocmem, OCMEM_REG_GFX_MPU_END, 0x0);
+ }
+
+ kfree(buf);
+
+ clear_bit_unlock(BIT(client), &ocmem->active_allocations);
+}
+EXPORT_SYMBOL(ocmem_free);
+
+static int ocmem_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long reg, region_size;
+ int i, j, ret, num_banks;
+ struct resource *res;
+ struct ocmem *ocmem;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ ocmem = devm_kzalloc(dev, sizeof(*ocmem), GFP_KERNEL);
+ if (!ocmem)
+ return -ENOMEM;
+
+ ocmem->dev = dev;
+ ocmem->config = device_get_match_data(dev);
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Unable to get clocks\n");
+
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+ ocmem->mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ocmem->mmio)) {
+ dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n");
+ return PTR_ERR(ocmem->mmio);
+ }
+
+ ocmem->memory = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mem");
+ if (!ocmem->memory) {
+ dev_err(dev, "Could not get mem region\n");
+ return -ENXIO;
+ }
+
+ /* The core clock is synchronous with graphics */
+ WARN_ON(clk_set_rate(ocmem_clks[OCMEM_CLK_CORE_IDX].clk, 1000) < 0);
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ if (ret) {
+ dev_info(ocmem->dev, "Failed to enable clocks\n");
+ return ret;
+ }
+
+ if (qcom_scm_restore_sec_cfg_available()) {
+ dev_dbg(dev, "configuring scm\n");
+ ret = qcom_scm_restore_sec_cfg(QCOM_SCM_OCMEM_DEV_ID, 0);
+ if (ret) {
+ dev_err(dev, "Could not enable secure configuration\n");
+ goto err_clk_disable;
+ }
+ }
+
+ reg = ocmem_read(ocmem, OCMEM_REG_HW_PROFILE);
+ ocmem->num_ports = OCMEM_HW_PROFILE_NUM_PORTS(reg);
+ ocmem->num_macros = OCMEM_HW_PROFILE_NUM_MACROS(reg);
+ ocmem->interleaved = !!(reg & OCMEM_HW_PROFILE_INTERLEAVING);
+
+ num_banks = ocmem->num_ports / 2;
+ region_size = ocmem->config->macro_size * num_banks;
+
+ dev_info(dev, "%u ports, %u regions, %u macros, %sinterleaved\n",
+ ocmem->num_ports, ocmem->config->num_regions,
+ ocmem->num_macros, ocmem->interleaved ? "" : "not ");
+
+ ocmem->regions = devm_kcalloc(dev, ocmem->config->num_regions,
+ sizeof(struct ocmem_region), GFP_KERNEL);
+ if (!ocmem->regions) {
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ for (i = 0; i < ocmem->config->num_regions; i++) {
+ struct ocmem_region *region = &ocmem->regions[i];
+
+ if (WARN_ON(num_banks > ARRAY_SIZE(region->macro_state))) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ region->mode = MODE_DEFAULT;
+ region->num_macros = num_banks;
+
+ if (i == (ocmem->config->num_regions - 1) &&
+ reg & OCMEM_HW_PROFILE_LAST_REGN_HALFSIZE) {
+ region->macro_size = ocmem->config->macro_size / 2;
+ region->region_size = region_size / 2;
+ } else {
+ region->macro_size = ocmem->config->macro_size;
+ region->region_size = region_size;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(region->macro_state); j++)
+ region->macro_state[j] = CLK_OFF;
+ }
+
+ platform_set_drvdata(pdev, ocmem);
+
+ return 0;
+
+err_clk_disable:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+ return ret;
+}
+
+static int ocmem_dev_remove(struct platform_device *pdev)
+{
+ clk_bulk_disable_unprepare(ARRAY_SIZE(ocmem_clks), ocmem_clks);
+
+ return 0;
+}
+
+static const struct ocmem_config ocmem_8974_config = {
+ .num_regions = 3,
+ .macro_size = SZ_128K,
+};
+
+static const struct of_device_id ocmem_of_match[] = {
+ { .compatible = "qcom,msm8974-ocmem", .data = &ocmem_8974_config },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ocmem_of_match);
+
+static struct platform_driver ocmem_driver = {
+ .probe = ocmem_dev_probe,
+ .remove = ocmem_dev_remove,
+ .driver = {
+ .name = "ocmem",
+ .of_match_table = ocmem_of_match,
+ },
+};
+
+module_platform_driver(ocmem_driver);
+
+MODULE_DESCRIPTION("On Chip Memory (OCMEM) allocator for some Snapdragon SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
new file mode 100644
index 0000000..205cc96
--- /dev/null
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "pdr_internal.h"
+
+struct pdr_service {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ char service_path[SERVREG_NAME_LENGTH + 1];
+
+ struct sockaddr_qrtr addr;
+
+ unsigned int instance;
+ unsigned int service;
+ u8 service_data_valid;
+ u32 service_data;
+ int state;
+
+ bool need_notifier_register;
+ bool need_notifier_remove;
+ bool need_locator_lookup;
+ bool service_connected;
+
+ struct list_head node;
+};
+
+struct pdr_handle {
+ struct qmi_handle locator_hdl;
+ struct qmi_handle notifier_hdl;
+
+ struct sockaddr_qrtr locator_addr;
+
+ struct list_head lookups;
+ struct list_head indack_list;
+
+ /* control access to pdr lookup/indack lists */
+ struct mutex list_lock;
+
+ /* serialize pd status invocation */
+ struct mutex status_lock;
+
+ /* control access to the locator state */
+ struct mutex lock;
+
+ bool locator_init_complete;
+
+ struct work_struct locator_work;
+ struct work_struct notifier_work;
+ struct work_struct indack_work;
+
+ struct workqueue_struct *notifier_wq;
+ struct workqueue_struct *indack_wq;
+
+ void (*status)(int state, char *service_path, void *priv);
+ void *priv;
+};
+
+struct pdr_list_node {
+ enum servreg_service_state curr_state;
+ u16 transaction_id;
+ struct pdr_service *pds;
+ struct list_head node;
+};
+
+static int pdr_locator_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+ struct pdr_service *pds;
+
+ /* Create a local client port for QMI communication */
+ pdr->locator_addr.sq_family = AF_QIPCRTR;
+ pdr->locator_addr.sq_node = svc->node;
+ pdr->locator_addr.sq_port = svc->port;
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = true;
+ mutex_unlock(&pdr->lock);
+
+ /* Service pending lookup requests */
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->need_locator_lookup)
+ schedule_work(&pdr->locator_work);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_locator_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ locator_hdl);
+
+ mutex_lock(&pdr->lock);
+ pdr->locator_init_complete = false;
+ mutex_unlock(&pdr->lock);
+
+ pdr->locator_addr.sq_node = 0;
+ pdr->locator_addr.sq_port = 0;
+}
+
+static struct qmi_ops pdr_locator_ops = {
+ .new_server = pdr_locator_new_server,
+ .del_server = pdr_locator_del_server,
+};
+
+static int pdr_register_listener(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ bool enable)
+{
+ struct servreg_register_listener_resp resp;
+ struct servreg_register_listener_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_register_listener_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.enable = enable;
+ strcpy(req.service_path, pds->service_path);
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_REGISTER_LISTENER_REQ,
+ SERVREG_REGISTER_LISTENER_REQ_LEN,
+ servreg_register_listener_req_ei,
+ &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s register listener txn wait failed: %d\n",
+ pds->service_path, ret);
+ return ret;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s register listener failed: 0x%x\n",
+ pds->service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ pds->state = resp.curr_state;
+
+ return 0;
+}
+
+static void pdr_notifier_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ notifier_work);
+ struct pdr_service *pds;
+ int ret;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service_connected) {
+ if (!pds->need_notifier_register)
+ continue;
+
+ pds->need_notifier_register = false;
+ ret = pdr_register_listener(pdr, pds, true);
+ if (ret < 0)
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ } else {
+ if (!pds->need_notifier_remove)
+ continue;
+
+ pds->need_notifier_remove = false;
+ pds->state = SERVREG_SERVICE_STATE_DOWN;
+ }
+
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static int pdr_notifier_new_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = true;
+ pds->need_notifier_register = true;
+ pds->addr.sq_family = AF_QIPCRTR;
+ pds->addr.sq_node = svc->node;
+ pds->addr.sq_port = svc->port;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ return 0;
+}
+
+static void pdr_notifier_del_server(struct qmi_handle *qmi,
+ struct qmi_service *svc)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ struct pdr_service *pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (pds->service == svc->service &&
+ pds->instance == svc->instance) {
+ pds->service_connected = false;
+ pds->need_notifier_remove = true;
+ pds->addr.sq_node = 0;
+ pds->addr.sq_port = 0;
+ queue_work(pdr->notifier_wq, &pdr->notifier_work);
+ }
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+static struct qmi_ops pdr_notifier_ops = {
+ .new_server = pdr_notifier_new_server,
+ .del_server = pdr_notifier_del_server,
+};
+
+static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
+ u16 tid)
+{
+ struct servreg_set_ack_resp resp;
+ struct servreg_set_ack_req req;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ req.transaction_id = tid;
+ strcpy(req.service_path, pds->service_path);
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
+ &txn, SERVREG_SET_ACK_REQ,
+ SERVREG_SET_ACK_REQ_LEN,
+ servreg_set_ack_req_ei,
+ &req);
+
+ /* Skip waiting for response */
+ qmi_txn_cancel(&txn);
+ return ret;
+}
+
+static void pdr_indack_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ indack_work);
+ struct pdr_list_node *ind, *tmp;
+ struct pdr_service *pds;
+
+ list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
+ pds = ind->pds;
+
+ mutex_lock(&pdr->status_lock);
+ pds->state = ind->curr_state;
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+
+ /* Ack the indication after clients release the PD resources */
+ pdr_send_indack_msg(pdr, pds, ind->transaction_id);
+
+ mutex_lock(&pdr->list_lock);
+ list_del(&ind->node);
+ mutex_unlock(&pdr->list_lock);
+
+ kfree(ind);
+ }
+}
+
+static void pdr_indication_cb(struct qmi_handle *qmi,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
+ notifier_hdl);
+ const struct servreg_state_updated_ind *ind_msg = data;
+ struct pdr_list_node *ind;
+ struct pdr_service *pds;
+ bool found = false;
+
+ if (!ind_msg || !ind_msg->service_path[0] ||
+ strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(pds, &pdr->lookups, node) {
+ if (strcmp(pds->service_path, ind_msg->service_path))
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!found)
+ return;
+
+ pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
+ ind_msg->service_path, ind_msg->curr_state,
+ ind_msg->transaction_id);
+
+ ind = kzalloc(sizeof(*ind), GFP_KERNEL);
+ if (!ind)
+ return;
+
+ ind->transaction_id = ind_msg->transaction_id;
+ ind->curr_state = ind_msg->curr_state;
+ ind->pds = pds;
+
+ mutex_lock(&pdr->list_lock);
+ list_add_tail(&ind->node, &pdr->indack_list);
+ mutex_unlock(&pdr->list_lock);
+
+ queue_work(pdr->indack_wq, &pdr->indack_work);
+}
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = SERVREG_STATE_UPDATED_IND_ID,
+ .ei = servreg_state_updated_ind_ei,
+ .decoded_size = sizeof(struct servreg_state_updated_ind),
+ .fn = pdr_indication_cb,
+ },
+ {}
+};
+
+static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
+ struct servreg_get_domain_list_resp *resp,
+ struct pdr_handle *pdr)
+{
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&pdr->locator_hdl, &txn,
+ servreg_get_domain_list_resp_ei, resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->locator_hdl,
+ &pdr->locator_addr,
+ &txn, SERVREG_GET_DOMAIN_LIST_REQ,
+ SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
+ servreg_get_domain_list_req_ei,
+ req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s get domain list txn wait failed: %d\n",
+ req->service_name, ret);
+ return ret;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s get domain list failed: 0x%x\n",
+ req->service_name, resp->resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_get_domain_list_resp *resp;
+ struct servreg_get_domain_list_req req;
+ struct servreg_location_entry *entry;
+ int domains_read = 0;
+ int ret, i;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /* Prepare req message */
+ strcpy(req.service_name, pds->service_name);
+ req.domain_offset_valid = true;
+ req.domain_offset = 0;
+
+ do {
+ req.domain_offset = domains_read;
+ ret = pdr_get_domain_list(&req, resp, pdr);
+ if (ret < 0)
+ goto out;
+
+ for (i = domains_read; i < resp->domain_list_len; i++) {
+ entry = &resp->domain_list[i];
+
+ if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
+ continue;
+
+ if (!strcmp(entry->name, pds->service_path)) {
+ pds->service_data_valid = entry->service_data_valid;
+ pds->service_data = entry->service_data;
+ pds->instance = entry->instance;
+ goto out;
+ }
+ }
+
+ /* Update ret to indicate that the service is not yet found */
+ ret = -ENXIO;
+
+ /* Always read total_domains from the response msg */
+ if (resp->domain_list_len > resp->total_domains)
+ resp->domain_list_len = resp->total_domains;
+
+ domains_read += resp->domain_list_len;
+ } while (domains_read < resp->total_domains);
+out:
+ kfree(resp);
+ return ret;
+}
+
+static void pdr_notify_lookup_failure(struct pdr_handle *pdr,
+ struct pdr_service *pds,
+ int err)
+{
+ pr_err("PDR: service lookup for %s failed: %d\n",
+ pds->service_name, err);
+
+ if (err == -ENXIO)
+ return;
+
+ list_del(&pds->node);
+ pds->state = SERVREG_LOCATOR_ERR;
+ mutex_lock(&pdr->status_lock);
+ pdr->status(pds->state, pds->service_path, pdr->priv);
+ mutex_unlock(&pdr->status_lock);
+ kfree(pds);
+}
+
+static void pdr_locator_work(struct work_struct *work)
+{
+ struct pdr_handle *pdr = container_of(work, struct pdr_handle,
+ locator_work);
+ struct pdr_service *pds, *tmp;
+ int ret = 0;
+
+ /* Bail out early if the SERVREG LOCATOR QMI service is not up */
+ mutex_lock(&pdr->lock);
+ if (!pdr->locator_init_complete) {
+ mutex_unlock(&pdr->lock);
+ pr_debug("PDR: SERVICE LOCATOR service not available\n");
+ return;
+ }
+ mutex_unlock(&pdr->lock);
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ if (!pds->need_locator_lookup)
+ continue;
+
+ ret = pdr_locate_service(pdr, pds);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1,
+ pds->instance);
+ if (ret < 0) {
+ pdr_notify_lookup_failure(pdr, pds, ret);
+ continue;
+ }
+
+ pds->need_locator_lookup = false;
+ }
+ mutex_unlock(&pdr->list_lock);
+}
+
+/**
+ * pdr_add_lookup() - register a tracking request for a PD
+ * @pdr: PDR client handle
+ * @service_name: service name of the tracking request
+ * @service_path: service path of the tracking request
+ *
+ * Registering a pdr lookup allows for tracking the life cycle of the PD.
+ *
+ * Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is
+ * returned if a lookup is already in progress for the given service path.
+ */
+struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
+ const char *service_name,
+ const char *service_path)
+{
+ struct pdr_service *pds, *tmp;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return ERR_PTR(-EINVAL);
+
+ if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH ||
+ !service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
+ return ERR_PTR(-EINVAL);
+
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
+ if (!pds)
+ return ERR_PTR(-ENOMEM);
+
+ pds->service = SERVREG_NOTIFIER_SERVICE;
+ strcpy(pds->service_name, service_name);
+ strcpy(pds->service_path, service_path);
+ pds->need_locator_lookup = true;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (strcmp(tmp->service_path, service_path))
+ continue;
+
+ mutex_unlock(&pdr->list_lock);
+ ret = -EALREADY;
+ goto err;
+ }
+
+ list_add(&pds->node, &pdr->lookups);
+ mutex_unlock(&pdr->list_lock);
+
+ schedule_work(&pdr->locator_work);
+
+ return pds;
+err:
+ kfree(pds);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_add_lookup);
+
+/**
+ * pdr_restart_pd() - restart PD
+ * @pdr: PDR client handle
+ * @pds: PD service handle
+ *
+ * Restarts the PD tracked by the PDR client handle for a given service path.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
+{
+ struct servreg_restart_pd_resp resp;
+ struct servreg_restart_pd_req req = { 0 };
+ struct sockaddr_qrtr addr;
+ struct pdr_service *tmp;
+ struct qmi_txn txn;
+ int ret;
+
+ if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds))
+ return -EINVAL;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry(tmp, &pdr->lookups, node) {
+ if (tmp != pds)
+ continue;
+
+ if (!pds->service_connected)
+ break;
+
+ /* Prepare req message */
+ strcpy(req.service_path, pds->service_path);
+ addr = pds->addr;
+ break;
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ if (!req.service_path[0])
+ return -EINVAL;
+
+ ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
+ servreg_restart_pd_resp_ei,
+ &resp);
+ if (ret < 0)
+ return ret;
+
+ ret = qmi_send_request(&pdr->notifier_hdl, &addr,
+ &txn, SERVREG_RESTART_PD_REQ,
+ SERVREG_RESTART_PD_REQ_MAX_LEN,
+ servreg_restart_pd_req_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ return ret;
+ }
+
+ ret = qmi_txn_wait(&txn, 5 * HZ);
+ if (ret < 0) {
+ pr_err("PDR: %s PD restart txn wait failed: %d\n",
+ req.service_path, ret);
+ return ret;
+ }
+
+ /* Check response if PDR is disabled */
+ if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
+ resp.resp.error == QMI_ERR_DISABLED_V01) {
+ pr_err("PDR: %s PD restart is disabled: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EOPNOTSUPP;
+ }
+
+ /* Check the response for other error case*/
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("PDR: %s request for PD restart failed: 0x%x\n",
+ req.service_path, resp.resp.error);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pdr_restart_pd);
+
+/**
+ * pdr_handle_alloc() - initialize the PDR client handle
+ * @status: function to be called on PD state change
+ * @priv: handle for client's use
+ *
+ * Initializes the PDR client handle to allow for tracking/restart of PDs.
+ *
+ * Return: pdr_handle object on success, ERR_PTR on failure.
+ */
+struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
+ char *service_path,
+ void *priv), void *priv)
+{
+ struct pdr_handle *pdr;
+ int ret;
+
+ if (!status)
+ return ERR_PTR(-EINVAL);
+
+ pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
+ if (!pdr)
+ return ERR_PTR(-ENOMEM);
+
+ pdr->status = status;
+ pdr->priv = priv;
+
+ mutex_init(&pdr->status_lock);
+ mutex_init(&pdr->list_lock);
+ mutex_init(&pdr->lock);
+
+ INIT_LIST_HEAD(&pdr->lookups);
+ INIT_LIST_HEAD(&pdr->indack_list);
+
+ INIT_WORK(&pdr->locator_work, pdr_locator_work);
+ INIT_WORK(&pdr->notifier_work, pdr_notifier_work);
+ INIT_WORK(&pdr->indack_work, pdr_indack_work);
+
+ pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
+ if (!pdr->notifier_wq) {
+ ret = -ENOMEM;
+ goto free_pdr_handle;
+ }
+
+ pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
+ if (!pdr->indack_wq) {
+ ret = -ENOMEM;
+ goto destroy_notifier;
+ }
+
+ ret = qmi_handle_init(&pdr->locator_hdl,
+ SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
+ &pdr_locator_ops, NULL);
+ if (ret < 0)
+ goto destroy_indack;
+
+ ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ ret = qmi_handle_init(&pdr->notifier_hdl,
+ SERVREG_STATE_UPDATED_IND_MAX_LEN,
+ &pdr_notifier_ops,
+ qmi_indication_handler);
+ if (ret < 0)
+ goto release_qmi_handle;
+
+ return pdr;
+
+release_qmi_handle:
+ qmi_handle_release(&pdr->locator_hdl);
+destroy_indack:
+ destroy_workqueue(pdr->indack_wq);
+destroy_notifier:
+ destroy_workqueue(pdr->notifier_wq);
+free_pdr_handle:
+ kfree(pdr);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pdr_handle_alloc);
+
+/**
+ * pdr_handle_release() - release the PDR client handle
+ * @pdr: PDR client handle
+ *
+ * Cleans up pending tracking requests and releases the underlying qmi handles.
+ */
+void pdr_handle_release(struct pdr_handle *pdr)
+{
+ struct pdr_service *pds, *tmp;
+
+ if (IS_ERR_OR_NULL(pdr))
+ return;
+
+ mutex_lock(&pdr->list_lock);
+ list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
+ list_del(&pds->node);
+ kfree(pds);
+ }
+ mutex_unlock(&pdr->list_lock);
+
+ cancel_work_sync(&pdr->locator_work);
+ cancel_work_sync(&pdr->notifier_work);
+ cancel_work_sync(&pdr->indack_work);
+
+ destroy_workqueue(pdr->notifier_wq);
+ destroy_workqueue(pdr->indack_wq);
+
+ qmi_handle_release(&pdr->locator_hdl);
+ qmi_handle_release(&pdr->notifier_hdl);
+
+ kfree(pdr);
+}
+EXPORT_SYMBOL(pdr_handle_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
new file mode 100644
index 0000000..ab9ae8c
--- /dev/null
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PDR_HELPER_INTERNAL__
+#define __QCOM_PDR_HELPER_INTERNAL__
+
+#include <linux/soc/qcom/pdr.h>
+
+#define SERVREG_LOCATOR_SERVICE 0x40
+#define SERVREG_NOTIFIER_SERVICE 0x42
+
+#define SERVREG_REGISTER_LISTENER_REQ 0x20
+#define SERVREG_GET_DOMAIN_LIST_REQ 0x21
+#define SERVREG_STATE_UPDATED_IND_ID 0x22
+#define SERVREG_SET_ACK_REQ 0x23
+#define SERVREG_RESTART_PD_REQ 0x24
+
+#define SERVREG_DOMAIN_LIST_LENGTH 32
+#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
+#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
+#define SERVREG_SET_ACK_REQ_LEN 72
+#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
+#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
+#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
+
+struct servreg_location_entry {
+ char name[SERVREG_NAME_LENGTH + 1];
+ u8 service_data_valid;
+ u32 service_data;
+ u32 instance;
+};
+
+struct qmi_elem_info servreg_location_entry_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ name),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ instance),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct servreg_location_entry,
+ service_data),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_req {
+ char service_name[SERVREG_NAME_LENGTH + 1];
+ u8 domain_offset_valid;
+ u32 domain_offset;
+};
+
+struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ service_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_req,
+ domain_offset),
+ },
+ {}
+};
+
+struct servreg_get_domain_list_resp {
+ struct qmi_response_type_v01 resp;
+ u8 total_domains_valid;
+ u16 total_domains;
+ u8 db_rev_count_valid;
+ u16 db_rev_count;
+ u8 domain_list_valid;
+ u32 domain_list_len;
+ struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
+};
+
+struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ total_domains),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ db_rev_count),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = SERVREG_DOMAIN_LIST_LENGTH,
+ .elem_size = sizeof(struct servreg_location_entry),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct servreg_get_domain_list_resp,
+ domain_list),
+ .ei_array = servreg_location_entry_ei,
+ },
+ {}
+};
+
+struct servreg_register_listener_req {
+ u8 enable;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_register_listener_req_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_register_listener_req,
+ enable),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_register_listener_resp {
+ struct qmi_response_type_v01 resp;
+ u8 curr_state_valid;
+ enum servreg_service_state curr_state;
+};
+
+struct qmi_elem_info servreg_register_listener_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum servreg_service_state),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct servreg_register_listener_resp,
+ curr_state),
+ },
+ {}
+};
+
+struct servreg_restart_pd_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+};
+
+struct qmi_elem_info servreg_restart_pd_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_restart_pd_req,
+ service_path),
+ },
+ {}
+};
+
+struct servreg_restart_pd_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_restart_pd_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+struct servreg_state_updated_ind {
+ enum servreg_service_state curr_state;
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_state_updated_ind_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ curr_state),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct servreg_state_updated_ind,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_req {
+ char service_path[SERVREG_NAME_LENGTH + 1];
+ u16 transaction_id;
+};
+
+struct qmi_elem_info servreg_set_ack_req_ei[] = {
+ {
+ .data_type = QMI_STRING,
+ .elem_len = SERVREG_NAME_LENGTH + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct servreg_set_ack_req,
+ service_path),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_req,
+ transaction_id),
+ },
+ {}
+};
+
+struct servreg_set_ack_resp {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_elem_info servreg_set_ack_resp_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct servreg_set_ack_resp,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+#endif
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 2e66098..0dbca67 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -92,6 +92,9 @@
struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
};
+static const char * const icc_path_names[] = {"qup-core", "qup-config",
+ "qup-memory"};
+
#define QUP_HW_VER_REG 0x4
/* Common SE registers */
@@ -733,6 +736,97 @@
}
EXPORT_SYMBOL(geni_se_rx_dma_unprep);
+int geni_icc_get(struct geni_se *se, const char *icc_ddr)
+{
+ int i, err;
+ const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
+
+ if (has_acpi_companion(se->dev))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ if (!icc_names[i])
+ continue;
+
+ se->icc_paths[i].path = devm_of_icc_get(se->dev, icc_names[i]);
+ if (IS_ERR(se->icc_paths[i].path))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ err = PTR_ERR(se->icc_paths[i].path);
+ if (err != -EPROBE_DEFER)
+ dev_err_ratelimited(se->dev, "Failed to get ICC path '%s': %d\n",
+ icc_names[i], err);
+ return err;
+
+}
+EXPORT_SYMBOL(geni_icc_get);
+
+int geni_icc_set_bw(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_set_bw(se->icc_paths[i].path,
+ se->icc_paths[i].avg_bw, se->icc_paths[i].avg_bw);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC BW voting failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_set_bw);
+
+void geni_icc_set_tag(struct geni_se *se, u32 tag)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++)
+ icc_set_tag(se->icc_paths[i].path, tag);
+}
+EXPORT_SYMBOL(geni_icc_set_tag);
+
+/* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */
+int geni_icc_enable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_enable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC enable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_enable);
+
+int geni_icc_disable(struct geni_se *se)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
+ ret = icc_disable(se->icc_paths[i].path);
+ if (ret) {
+ dev_err_ratelimited(se->dev, "ICC disable failed on path '%s': %d\n",
+ icc_path_names[i], ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(geni_icc_disable);
+
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 45c5aa7..4fe88d4 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -44,7 +44,7 @@
#define QMP_NUM_COOLING_RESOURCES 2
-static bool qmp_cdev_init_state = 1;
+static bool qmp_cdev_max_state = 1;
struct qmp_cooling_device {
struct thermal_cooling_device *cdev;
@@ -200,7 +200,7 @@
{
struct qmp *qmp = data;
- wake_up_interruptible_all(&qmp->event);
+ wake_up_all(&qmp->event);
return IRQ_HANDLED;
}
@@ -225,6 +225,7 @@
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
+ size_t tlen;
int ret;
if (WARN_ON(len + sizeof(u32) > qmp->size))
@@ -239,6 +240,9 @@
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
data, len / sizeof(u32));
writel(len, qmp->msgram + qmp->offset);
+
+ /* Read back len to confirm data written in message RAM */
+ tlen = readl(qmp->msgram + qmp->offset);
qmp_kick(qmp);
time_left = wait_event_interruptible_timeout(qmp->event,
@@ -402,7 +406,7 @@
static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- *state = qmp_cdev_init_state;
+ *state = qmp_cdev_max_state;
return 0;
}
@@ -432,7 +436,7 @@
snprintf(buf, sizeof(buf),
"{class: volt_flr, event:zero_temp, res:%s, value:%s}",
qmp_cdev->name,
- cdev_state ? "off" : "on");
+ cdev_state ? "on" : "off");
ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf));
@@ -455,7 +459,7 @@
char *cdev_name = (char *)node->name;
qmp_cdev->qmp = qmp;
- qmp_cdev->state = qmp_cdev_init_state;
+ qmp_cdev->state = !qmp_cdev_max_state;
qmp_cdev->name = cdev_name;
qmp_cdev->cdev = devm_thermal_of_cooling_device_register
(qmp->dev, node,
@@ -599,6 +603,7 @@
{ .compatible = "qcom,sc7180-aoss-qmp", },
{ .compatible = "qcom,sdm845-aoss-qmp", },
{ .compatible = "qcom,sm8150-aoss-qmp", },
+ { .compatible = "qcom,sm8250-aoss-qmp", },
{}
};
MODULE_DEVICE_TABLE(of, qmp_dt_match);
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index f9e309f..1a03eaa 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -655,8 +655,12 @@
qmi->sock = qmi_sock_create(qmi, &qmi->sq);
if (IS_ERR(qmi->sock)) {
- pr_err("failed to create QMI socket\n");
- ret = PTR_ERR(qmi->sock);
+ if (PTR_ERR(qmi->sock) == -EAFNOSUPPORT) {
+ ret = -EPROBE_DEFER;
+ } else {
+ pr_err("failed to create QMI socket\n");
+ ret = PTR_ERR(qmi->sock);
+ }
goto err_destroy_wq;
}
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index a7bbbb6..344ba68 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -8,6 +8,7 @@
#define __RPM_INTERNAL_H__
#include <linux/bitmap.h>
+#include <linux/wait.h>
#include <soc/qcom/tcs.h>
#define TCS_TYPE_NR 4
@@ -22,16 +23,23 @@
* struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
* to the controller
*
- * @drv: the controller
- * @type: type of the TCS in this group - active, sleep, wake
- * @mask: mask of the TCSes relative to all the TCSes in the RSC
- * @offset: start of the TCS group relative to the TCSes in the RSC
- * @num_tcs: number of TCSes in this type
- * @ncpt: number of commands in each TCS
- * @lock: lock for synchronizing this TCS writes
- * @req: requests that are sent from the TCS
- * @cmd_cache: flattened cache of cmds in sleep/wake TCS
- * @slots: indicates which of @cmd_addr are occupied
+ * @drv: The controller.
+ * @type: Type of the TCS in this group - active, sleep, wake.
+ * @mask: Mask of the TCSes relative to all the TCSes in the RSC.
+ * @offset: Start of the TCS group relative to the TCSes in the RSC.
+ * @num_tcs: Number of TCSes in this type.
+ * @ncpt: Number of commands in each TCS.
+ * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY
+ * transfers (could be on a wake/sleep TCS if we are borrowing for
+ * an ACTIVE_ONLY transfer).
+ * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock,
+ * trigger
+ * End: get irq, access req,
+ * grab drv->lock, clear tcs_in_use, drop drv->lock
+ * @slots: Indicates which of @cmd_addr are occupied; only used for
+ * SLEEP / WAKE TCSs. Things are tightly packed in the
+ * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and
+ * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS.
*/
struct tcs_group {
struct rsc_drv *drv;
@@ -40,9 +48,7 @@
u32 offset;
int num_tcs;
int ncpt;
- spinlock_t lock;
const struct tcs_request *req[MAX_TCS_PER_TYPE];
- u32 *cmd_cache;
DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
};
@@ -84,31 +90,47 @@
* struct rsc_drv: the Direct Resource Voter (DRV) of the
* Resource State Coordinator controller (RSC)
*
- * @name: controller identifier
- * @tcs_base: start address of the TCS registers in this controller
- * @id: instance id in the controller (Direct Resource Voter)
- * @num_tcs: number of TCSes in this DRV
- * @tcs: TCS groups
- * @tcs_in_use: s/w state of the TCS
- * @lock: synchronize state of the controller
- * @client: handle to the DRV's client.
+ * @name: Controller identifier.
+ * @tcs_base: Start address of the TCS registers in this controller.
+ * @id: Instance id in the controller (Direct Resource Voter).
+ * @num_tcs: Number of TCSes in this DRV.
+ * @rsc_pm: CPU PM notifier for controller.
+ * Used when solver mode is not present.
+ * @cpus_in_pm: Number of CPUs not in idle power collapse.
+ * Used when solver mode is not present.
+ * @tcs: TCS groups.
+ * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY
+ * transfers, but might show a sleep/wake TCS in use if
+ * it was borrowed for an active_only transfer. You
+ * must hold the lock in this struct (AKA drv->lock) in
+ * order to update this.
+ * @lock: Synchronize state of the controller. If RPMH's cache
+ * lock will also be held, the order is: drv->lock then
+ * cache_lock.
+ * @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a
+ * slot
+ * @client: Handle to the DRV's client.
*/
struct rsc_drv {
const char *name;
void __iomem *tcs_base;
int id;
int num_tcs;
+ struct notifier_block rsc_pm;
+ atomic_t cpus_in_pm;
struct tcs_group tcs[TCS_TYPE_NR];
DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
spinlock_t lock;
+ wait_queue_head_t tcs_wait;
struct rpmh_ctrlr client;
};
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
const struct tcs_request *msg);
-int rpmh_rsc_invalidate(struct rsc_drv *drv);
+void rpmh_rsc_invalidate(struct rsc_drv *drv);
void rpmh_tx_done(const struct tcs_request *msg, int r);
+int rpmh_flush(struct rpmh_ctrlr *ctrlr);
#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 8924fcd..a297911 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -6,9 +6,11 @@
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
#include <linux/atomic.h>
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/of.h>
@@ -17,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/tcs.h>
@@ -30,21 +33,41 @@
#define RSC_DRV_TCS_OFFSET 672
#define RSC_DRV_CMD_OFFSET 20
-/* DRV Configuration Information Register */
+/* DRV HW Solver Configuration Information Register */
+#define DRV_SOLVER_CONFIG 0x04
+#define DRV_HW_SOLVER_MASK 1
+#define DRV_HW_SOLVER_SHIFT 24
+
+/* DRV TCS Configuration Information Register */
#define DRV_PRNT_CHLD_CONFIG 0x0C
#define DRV_NUM_TCS_MASK 0x3F
#define DRV_NUM_TCS_SHIFT 6
#define DRV_NCPT_MASK 0x1F
#define DRV_NCPT_SHIFT 27
-/* Register offsets */
+/* Offsets for common TCS Registers, one bit per TCS */
#define RSC_DRV_IRQ_ENABLE 0x00
#define RSC_DRV_IRQ_STATUS 0x04
-#define RSC_DRV_IRQ_CLEAR 0x08
-#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
+#define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */
+
+/*
+ * Offsets for per TCS Registers.
+ *
+ * TCSes start at 0x10 from tcs_base and are stored one after another.
+ * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
+ * of the below to find a register.
+ */
+#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */
#define RSC_DRV_CONTROL 0x14
-#define RSC_DRV_STATUS 0x18
-#define RSC_DRV_CMD_ENABLE 0x1C
+#define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */
+#define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */
+
+/*
+ * Offsets for per command in a TCS.
+ *
+ * Commands (up to 16) start at 0x30 in a TCS; multiply command index
+ * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
+ */
#define RSC_DRV_CMD_MSGID 0x30
#define RSC_DRV_CMD_ADDR 0x34
#define RSC_DRV_CMD_DATA 0x38
@@ -61,90 +84,183 @@
#define CMD_STATUS_ISSUED BIT(8)
#define CMD_STATUS_COMPL BIT(16)
-static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+/*
+ * Here's a high level overview of how all the registers in RPMH work
+ * together:
+ *
+ * - The main rpmh-rsc address is the base of a register space that can
+ * be used to find overall configuration of the hardware
+ * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
+ * space are all the TCS blocks. The offset of the TCS blocks is
+ * specified in the device tree by "qcom,tcs-offset" and used to
+ * compute tcs_base.
+ * - TCS blocks come one after another. Type, count, and order are
+ * specified by the device tree as "qcom,tcs-config".
+ * - Each TCS block has some registers, then space for up to 16 commands.
+ * Note that though address space is reserved for 16 commands, fewer
+ * might be present. See ncpt (num cmds per TCS).
+ *
+ * Here's a picture:
+ *
+ * +---------------------------------------------------+
+ * |RSC |
+ * | ctrl |
+ * | |
+ * | Drvs: |
+ * | +-----------------------------------------------+ |
+ * | |DRV0 | |
+ * | | ctrl/config | |
+ * | | IRQ | |
+ * | | | |
+ * | | TCSes: | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS0 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS1 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS2 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | ...... | |
+ * | +-----------------------------------------------+ |
+ * | +-----------------------------------------------+ |
+ * | |DRV1 | |
+ * | | (same as DRV0) | |
+ * | +-----------------------------------------------+ |
+ * | ...... |
+ * +---------------------------------------------------+
+ */
+
+static inline void __iomem *
+tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
{
- return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
- RSC_DRV_CMD_OFFSET * cmd_id);
+ return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
}
-static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
+static inline void __iomem *
+tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+{
+ return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
+}
+
+static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id)
+{
+ return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
+}
+
+static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
+{
+ return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
+}
+
+static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id, u32 data)
+{
+ writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
+}
+
+static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{
- writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
- RSC_DRV_CMD_OFFSET * cmd_id);
+ writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
}
-static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
-{
- writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
-}
-
-static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
+static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{
- writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
- for (;;) {
- if (data == readl(drv->tcs_base + reg +
- RSC_DRV_TCS_OFFSET * tcs_id))
- break;
+ int i;
+
+ writel(data, tcs_reg_addr(drv, reg, tcs_id));
+
+ /*
+ * Wait until we read back the same value. Use a counter rather than
+ * ktime for timeout since this may be called after timekeeping stops.
+ */
+ for (i = 0; i < USEC_PER_SEC; i++) {
+ if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data)
+ return;
udelay(1);
}
+ pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
+ data, tcs_id, reg);
}
+/**
+ * tcs_is_free() - Return if a TCS is totally free.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * Returns true if nobody has claimed this TCS (by setting tcs_in_use).
+ *
+ * Context: Must be called with the drv->lock held.
+ *
+ * Return: true if the given TCS is free.
+ */
static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
{
- return !test_bit(tcs_id, drv->tcs_in_use) &&
- read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+ return !test_bit(tcs_id, drv->tcs_in_use);
}
-static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
-{
- return &drv->tcs[type];
-}
-
-static int tcs_invalidate(struct rsc_drv *drv, int type)
+/**
+ * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
+ * @drv: The RSC controller.
+ * @type: SLEEP_TCS or WAKE_TCS
+ *
+ * This will clear the "slots" variable of the given tcs_group and also
+ * tell the hardware to forget about all entries.
+ *
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
+ */
+static void tcs_invalidate(struct rsc_drv *drv, int type)
{
int m;
- struct tcs_group *tcs;
+ struct tcs_group *tcs = &drv->tcs[type];
- tcs = get_tcs_of_type(drv, type);
-
- spin_lock(&tcs->lock);
- if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
- spin_unlock(&tcs->lock);
- return 0;
- }
+ /* Caller ensures nobody else is running so no lock */
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
+ return;
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
- if (!tcs_is_free(drv, m)) {
- spin_unlock(&tcs->lock);
- return -EAGAIN;
- }
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
}
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
- spin_unlock(&tcs->lock);
-
- return 0;
}
/**
- * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
+ * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
+ * @drv: The RSC controller.
*
- * @drv: the RSC controller
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
*/
-int rpmh_rsc_invalidate(struct rsc_drv *drv)
+void rpmh_rsc_invalidate(struct rsc_drv *drv)
{
- int ret;
-
- ret = tcs_invalidate(drv, SLEEP_TCS);
- if (!ret)
- ret = tcs_invalidate(drv, WAKE_TCS);
-
- return ret;
+ tcs_invalidate(drv, SLEEP_TCS);
+ tcs_invalidate(drv, WAKE_TCS);
}
+/**
+ * get_tcs_for_msg() - Get the tcs_group used to send the given message.
+ * @drv: The RSC controller.
+ * @msg: The message we want to send.
+ *
+ * This is normally pretty straightforward except if we are trying to send
+ * an ACTIVE_ONLY message but don't have any active_only TCSes.
+ *
+ * Return: A pointer to a tcs_group or an ERR_PTR.
+ */
static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
const struct tcs_request *msg)
{
@@ -168,15 +284,33 @@
/*
* If we are making an active request on a RSC that does not have a
* dedicated TCS for active state use, then re-purpose a wake TCS to
- * send active votes.
+ * send active votes. This is safe because we ensure any active-only
+ * transfers have finished before we use it (maybe by running from
+ * the last CPU in PM code).
*/
- tcs = get_tcs_of_type(drv, type);
+ tcs = &drv->tcs[type];
if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
- tcs = get_tcs_of_type(drv, WAKE_TCS);
+ tcs = &drv->tcs[WAKE_TCS];
return tcs;
}
+/**
+ * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * For ACTIVE_ONLY transfers we want to call back into the client when the
+ * transfer finishes. To do this we need the "request" that the client
+ * originally provided us. This function grabs the request that we stashed
+ * when we started the transfer.
+ *
+ * This only makes sense for ACTIVE_ONLY transfers since those are the only
+ * ones we track sending (the only ones we enable interrupts for and the only
+ * ones we call back to the client for).
+ *
+ * Return: The stashed request.
+ */
static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
int tcs_id)
{
@@ -192,6 +326,23 @@
return NULL;
}
+/**
+ * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @trigger: If true then untrigger/retrigger. If false then just untrigger.
+ *
+ * In the normal case we only ever call with "trigger=true" to start a
+ * transfer. That will un-trigger/disable the TCS from the last transfer
+ * then trigger/enable for this transfer.
+ *
+ * If we borrowed a wake TCS for an active-only transfer we'll also call
+ * this function with "trigger=false" to just do the un-trigger/disable
+ * before using the TCS for wake purposes again.
+ *
+ * Note that the AP is only in charge of triggering active-only transfers.
+ * The AP never triggers sleep/wake values using this function.
+ */
static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
{
u32 enable;
@@ -201,7 +352,7 @@
* While clearing ensure that the AMC mode trigger is cleared
* and then the mode enable is cleared.
*/
- enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
enable &= ~TCS_AMC_MODE_TRIGGER;
write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
enable &= ~TCS_AMC_MODE_ENABLE;
@@ -216,20 +367,36 @@
}
}
+/**
+ * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @enable: If true then enable; if false then disable
+ *
+ * We only ever call this when we borrow a wake TCS for an active-only
+ * transfer. For active-only TCSes interrupts are always left enabled.
+ */
static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
{
u32 data;
- data = read_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, 0);
+ data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
if (enable)
data |= BIT(tcs_id);
else
data &= ~BIT(tcs_id);
- write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, data);
+ writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
}
/**
- * tcs_tx_done: TX Done interrupt handler
+ * tcs_tx_done() - TX Done interrupt handler.
+ * @irq: The IRQ number (ignored).
+ * @p: Pointer to "struct rsc_drv".
+ *
+ * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
+ * IRQ for) when a transfer is done.
+ *
+ * Return: IRQ_HANDLED
*/
static irqreturn_t tcs_tx_done(int irq, void *p)
{
@@ -239,7 +406,7 @@
const struct tcs_request *req;
struct tcs_cmd *cmd;
- irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
+ irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
req = get_req_from_tcs(drv, i);
@@ -253,7 +420,7 @@
u32 sts;
cmd = &req->cmds[j];
- sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
+ sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
if (!(sts & CMD_STATUS_ISSUED) ||
((req->wait_for_compl || cmd->wait) &&
!(sts & CMD_STATUS_COMPL))) {
@@ -276,7 +443,7 @@
/* Reclaim the TCS */
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
- write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+ writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
spin_lock(&drv->lock);
clear_bit(i, drv->tcs_in_use);
/*
@@ -287,6 +454,7 @@
if (!drv->tcs[ACTIVE_TCS].num_tcs)
enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock);
+ wake_up(&drv->tcs_wait);
if (req)
rpmh_tx_done(req, err);
}
@@ -294,6 +462,16 @@
return IRQ_HANDLED;
}
+/**
+ * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @cmd_id: The index within the TCS to start writing.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This is used for all types of transfers (active, sleep, and wake).
+ */
static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
const struct tcs_request *msg)
{
@@ -307,7 +485,7 @@
cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
cmd_msgid |= CMD_MSGID_WRITE;
- cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
cmd = &msg->cmds[i];
@@ -319,14 +497,34 @@
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
- trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
+ trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
- cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
}
+/**
+ * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
+ * @drv: The controller.
+ * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This will walk through the TCSes in the group and check if any of them
+ * appear to be sending to addresses referenced in the message. If it finds
+ * one it'll return -EBUSY.
+ *
+ * Only for use for active-only transfers.
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: 0 if nothing in flight or -EBUSY if we should try again later.
+ * The caller must re-enable interrupts between tries since that's
+ * the only way tcs_is_free() will ever return true and the only way
+ * RSC_DRV_CMD_ENABLE will ever be cleared.
+ */
static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
const struct tcs_request *msg)
{
@@ -339,10 +537,10 @@
if (tcs_is_free(drv, tcs_id))
continue;
- curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
- addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+ addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
for (k = 0; k < msg->num_cmds; k++) {
if (addr == msg->cmds[k].addr)
return -EBUSY;
@@ -353,6 +551,15 @@
return 0;
}
+/**
+ * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
+ * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
+ * we borrowed it because there are zero active-only ones).
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: The first tcs that's free.
+ */
static int find_free_tcs(struct tcs_group *tcs)
{
int i;
@@ -365,35 +572,75 @@
return -EBUSY;
}
-static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
+/**
+ * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active.
+ * @drv: The controller.
+ * @tcs: The tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The data to be sent.
+ *
+ * Claims a tcs in the given tcs_group while making sure that no existing cmd
+ * is in flight that would conflict with the one in @msg.
+ *
+ * Context: Must be called with the drv->lock held since that protects
+ * tcs_in_use.
+ *
+ * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight
+ * or the tcs_group is full.
+ */
+static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs,
+ const struct tcs_request *msg)
{
- struct tcs_group *tcs;
- int tcs_id;
- unsigned long flags;
int ret;
- tcs = get_tcs_for_msg(drv, msg);
- if (IS_ERR(tcs))
- return PTR_ERR(tcs);
-
- spin_lock_irqsave(&tcs->lock, flags);
- spin_lock(&drv->lock);
/*
* The h/w does not like if we send a request to the same address,
* when one is already in-flight or being processed.
*/
ret = check_for_req_inflight(drv, tcs, msg);
- if (ret) {
- spin_unlock(&drv->lock);
- goto done_write;
- }
+ if (ret)
+ return ret;
- tcs_id = find_free_tcs(tcs);
- if (tcs_id < 0) {
- ret = tcs_id;
- spin_unlock(&drv->lock);
- goto done_write;
- }
+ return find_free_tcs(tcs);
+}
+
+/**
+ * rpmh_rsc_send_data() - Write / trigger active-only message.
+ * @drv: The controller.
+ * @msg: The data to be sent.
+ *
+ * NOTES:
+ * - This is only used for "ACTIVE_ONLY" since the limitations of this
+ * function don't make sense for sleep/wake cases.
+ * - To do the transfer, we will grab a whole TCS for ourselves--we don't
+ * try to share. If there are none available we'll wait indefinitely
+ * for a free one.
+ * - This function will not wait for the commands to be finished, only for
+ * data to be programmed into the RPMh. See rpmh_tx_done() which will
+ * be called when the transfer is fully complete.
+ * - This function must be called with interrupts enabled. If the hardware
+ * is busy doing someone else's transfer we need that transfer to fully
+ * finish so that we can have the hardware, and to fully finish it needs
+ * the interrupt handler to run. If the interrupts is set to run on the
+ * active CPU this can never happen if interrupts are disabled.
+ *
+ * Return: 0 on success, -EINVAL on error.
+ */
+int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
+{
+ struct tcs_group *tcs;
+ int tcs_id;
+ unsigned long flags;
+
+ tcs = get_tcs_for_msg(drv, msg);
+ if (IS_ERR(tcs))
+ return PTR_ERR(tcs);
+
+ spin_lock_irqsave(&drv->lock, flags);
+
+ /* Wait forever for a free tcs. It better be there eventually! */
+ wait_event_lock_irq(drv->tcs_wait,
+ (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
+ drv->lock);
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use);
@@ -407,85 +654,44 @@
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
- spin_unlock(&drv->lock);
+ spin_unlock_irqrestore(&drv->lock, flags);
+ /*
+ * These two can be done after the lock is released because:
+ * - We marked "tcs_in_use" under lock.
+ * - Once "tcs_in_use" has been marked nobody else could be writing
+ * to these registers until the interrupt goes off.
+ * - The interrupt can't go off until we trigger w/ the last line
+ * of __tcs_set_trigger() below.
+ */
__tcs_buffer_write(drv, tcs_id, 0, msg);
__tcs_set_trigger(drv, tcs_id, true);
-done_write:
- spin_unlock_irqrestore(&tcs->lock, flags);
- return ret;
+ return 0;
}
/**
- * rpmh_rsc_send_data: Validate the incoming message and write to the
- * appropriate TCS block.
+ * find_slots() - Find a place to write the given message.
+ * @tcs: The tcs group to search.
+ * @msg: The message we want to find room for.
+ * @tcs_id: If we return 0 from the function, we return the global ID of the
+ * TCS to write to here.
+ * @cmd_id: If we return 0 from the function, we return the index of
+ * the command array of the returned TCS where the client should
+ * start writing the message.
*
- * @drv: the controller
- * @msg: the data to be sent
+ * Only for use on sleep/wake TCSes since those are the only ones we maintain
+ * tcs->slots for.
*
- * Return: 0 on success, -EINVAL on error.
- * Note: This call blocks until a valid data is written to the TCS.
+ * Return: -ENOMEM if there was no room, else 0.
*/
-int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
-{
- int ret;
-
- if (!msg || !msg->cmds || !msg->num_cmds ||
- msg->num_cmds > MAX_RPMH_PAYLOAD) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- do {
- ret = tcs_write(drv, msg);
- if (ret == -EBUSY) {
- pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
- msg->cmds[0].addr);
- udelay(10);
- }
- } while (ret == -EBUSY);
-
- return ret;
-}
-
-static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
- int len)
-{
- int i, j;
-
- /* Check for already cached commands */
- for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
- if (tcs->cmd_cache[i] != cmd[0].addr)
- continue;
- if (i + len >= tcs->num_tcs * tcs->ncpt)
- goto seq_err;
- for (j = 0; j < len; j++) {
- if (tcs->cmd_cache[i + j] != cmd[j].addr)
- goto seq_err;
- }
- return i;
- }
-
- return -ENODATA;
-
-seq_err:
- WARN(1, "Message does not match previous sequence.\n");
- return -EINVAL;
-}
-
static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
int *tcs_id, int *cmd_id)
{
int slot, offset;
int i = 0;
- /* Find if we already have the msg in our TCS */
- slot = find_match(tcs, msg->cmds, msg->num_cmds);
- if (slot >= 0)
- goto copy_data;
-
- /* Do over, until we can fit the full payload in a TCS */
+ /* Do over, until we can fit the full payload in a single TCS */
do {
slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
i, msg->num_cmds, 0);
@@ -494,11 +700,7 @@
i += tcs->ncpt;
} while (slot + msg->num_cmds - 1 >= i);
-copy_data:
bitmap_set(tcs->slots, slot, msg->num_cmds);
- /* Copy the addresses of the resources over to the slots */
- for (i = 0; i < msg->num_cmds; i++)
- tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
offset = slot / tcs->ncpt;
*tcs_id = offset + tcs->offset;
@@ -507,52 +709,157 @@
return 0;
}
-static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
+/**
+ * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
+ * @drv: The controller.
+ * @msg: The data to be written to the controller.
+ *
+ * This should only be called for for sleep/wake state, never active-only
+ * state.
+ *
+ * The caller must ensure that no other RPMH actions are happening and the
+ * controller is idle when this function is called since it runs lockless.
+ *
+ * Return: 0 if no error; else -error.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
int tcs_id = 0, cmd_id = 0;
- unsigned long flags;
int ret;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock_irqsave(&tcs->lock, flags);
/* find the TCS id and the command in the TCS to write to */
ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
if (!ret)
__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
- spin_unlock_irqrestore(&tcs->lock, flags);
return ret;
}
/**
- * rpmh_rsc_write_ctrl_data: Write request to the controller
+ * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
+ * @drv: The controller
*
- * @drv: the controller
- * @msg: the data to be written to the controller
+ * Checks if any of the AMCs are busy in handling ACTIVE sets.
+ * This is called from the last cpu powering down before flushing
+ * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
+ * power collapse, so deny from the last cpu's pm notification.
*
- * There is no response returned for writing the request to the controller.
+ * Context: Must be called with the drv->lock held.
+ *
+ * Return:
+ * * False - AMCs are idle
+ * * True - AMCs are busy
*/
-int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
{
- if (!msg || !msg->cmds || !msg->num_cmds ||
- msg->num_cmds > MAX_RPMH_PAYLOAD) {
- pr_err("Payload error\n");
- return -EINVAL;
+ int m;
+ struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
+
+ /*
+ * If we made an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purposed wake TCSes
+ * should be checked for not busy, because we used wake TCSes for
+ * active requests in this case.
+ */
+ if (!tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+ if (!tcs_is_free(drv, m))
+ return true;
}
- /* Data sent to this API will not be sent immediately */
- if (msg->state == RPMH_ACTIVE_ONLY_STATE)
- return -EINVAL;
+ return false;
+}
- return tcs_ctrl_write(drv, msg);
+/**
+ * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
+ * @nfb: Pointer to the notifier block in struct rsc_drv.
+ * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
+ * @v: Unused
+ *
+ * This function is given to cpu_pm_register_notifier so we can be informed
+ * about when CPUs go down. When all CPUs go down we know no more active
+ * transfers will be started so we write sleep/wake sets. This function gets
+ * called from cpuidle code paths and also at system suspend time.
+ *
+ * If its last CPU going down and AMCs are not busy then writes cached sleep
+ * and wake messages to TCSes. The firmware then takes care of triggering
+ * them when entering deepest low power modes.
+ *
+ * Return: See cpu_pm_register_notifier()
+ */
+static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
+ int ret = NOTIFY_OK;
+ int cpus_in_pm;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
+ /*
+ * NOTE: comments for num_online_cpus() point out that it's
+ * only a snapshot so we need to be careful. It should be OK
+ * for us to use, though. It's important for us not to miss
+ * if we're the last CPU going down so it would only be a
+ * problem if a CPU went offline right after we did the check
+ * AND that CPU was not idle AND that CPU was the last non-idle
+ * CPU. That can't happen. CPUs would have to come out of idle
+ * before the CPU could go offline.
+ */
+ if (cpus_in_pm < num_online_cpus())
+ return NOTIFY_OK;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ atomic_dec(&drv->cpus_in_pm);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * It's likely we're on the last CPU. Grab the drv->lock and write
+ * out the sleep/wake commands to RPMH hardware. Grabbing the lock
+ * means that if we race with another CPU coming up we are still
+ * guaranteed to be safe. If another CPU came up just after we checked
+ * and has grabbed the lock or started an active transfer then we'll
+ * notice we're busy and abort. If another CPU comes up after we start
+ * flushing it will be blocked from starting an active transfer until
+ * we're done flushing. If another CPU starts an active transfer after
+ * we release the lock we're still OK because we're no longer the last
+ * CPU.
+ */
+ if (spin_trylock(&drv->lock)) {
+ if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
+ ret = NOTIFY_BAD;
+ spin_unlock(&drv->lock);
+ } else {
+ /* Another CPU must be up */
+ return NOTIFY_OK;
+ }
+
+ if (ret == NOTIFY_BAD) {
+ /* Double-check if we're here because someone else is up */
+ if (cpus_in_pm < num_online_cpus())
+ ret = NOTIFY_OK;
+ else
+ /* We won't be called w/ CPU_PM_ENTER_FAILED */
+ atomic_dec(&drv->cpus_in_pm);
+ }
+
+ return ret;
}
static int rpmh_probe_tcs_config(struct platform_device *pdev,
- struct rsc_drv *drv)
+ struct rsc_drv *drv, void __iomem *base)
{
struct tcs_type_config {
u32 type;
@@ -562,15 +869,6 @@
u32 config, max_tcs, ncpt, offset;
int i, ret, n, st = 0;
struct tcs_group *tcs;
- struct resource *res;
- void __iomem *base;
- char drv_id[10] = {0};
-
- snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
if (ret)
@@ -614,7 +912,6 @@
tcs->type = tcs_cfg[i].type;
tcs->num_tcs = tcs_cfg[i].n;
tcs->ncpt = ncpt;
- spin_lock_init(&tcs->lock);
if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
continue;
@@ -626,19 +923,6 @@
tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
tcs->offset = st;
st += tcs->num_tcs;
-
- /*
- * Allocate memory to cache sleep and wake requests to
- * avoid reading TCS register memory.
- */
- if (tcs->type == ACTIVE_TCS)
- continue;
-
- tcs->cmd_cache = devm_kcalloc(&pdev->dev,
- tcs->num_tcs * ncpt, sizeof(u32),
- GFP_KERNEL);
- if (!tcs->cmd_cache)
- return -ENOMEM;
}
drv->num_tcs = st;
@@ -650,7 +934,11 @@
{
struct device_node *dn = pdev->dev.of_node;
struct rsc_drv *drv;
+ struct resource *res;
+ char drv_id[10] = {0};
int ret, irq;
+ u32 solver_config;
+ void __iomem *base;
/*
* Even though RPMh doesn't directly use cmd-db, all of its children
@@ -676,11 +964,18 @@
if (!drv->name)
drv->name = dev_name(&pdev->dev);
- ret = rpmh_probe_tcs_config(pdev, drv);
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = rpmh_probe_tcs_config(pdev, drv, base);
if (ret)
return ret;
spin_lock_init(&drv->lock);
+ init_waitqueue_head(&drv->tcs_wait);
bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
irq = platform_get_irq(pdev, drv->id);
@@ -693,8 +988,22 @@
if (ret)
return ret;
+ /*
+ * CPU PM notification are not required for controllers that support
+ * 'HW solver' mode where they can be in autonomous mode executing low
+ * power mode to power down.
+ */
+ solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
+ solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
+ solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
+ if (!solver_config) {
+ drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
+ cpu_pm_register_notifier(&drv->rsc_pm);
+ }
+
/* Enable the active TCS to send requests immediately */
- write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
+ writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
+ drv->tcs_base + RSC_DRV_IRQ_ENABLE);
spin_lock_init(&drv->client.cache_lock);
INIT_LIST_HEAD(&drv->client.cache);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 13e1b45..b61e183 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -9,6 +9,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -23,7 +24,7 @@
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
-#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
+#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
struct rpmh_request name = { \
.msg = { \
.state = s, \
@@ -33,7 +34,7 @@
}, \
.cmd = { { 0 } }, \
.completion = q, \
- .dev = dev, \
+ .dev = device, \
.needs_free = false, \
}
@@ -297,12 +298,10 @@
{
struct batch_cache_req *req;
const struct rpmh_request *rpm_msg;
- unsigned long flags;
int ret = 0;
int i;
/* Send Sleep/Wake requests to the controller, expect no response */
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
@@ -312,7 +311,6 @@
break;
}
}
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return ret;
}
@@ -418,11 +416,10 @@
req->sleep_val != req->wake_val);
}
-static int send_single(const struct device *dev, enum rpmh_state state,
+static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
u32 addr, u32 data)
{
- DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
/* Wake sets are always complete and sleep sets are not */
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
@@ -434,64 +431,64 @@
}
/**
- * rpmh_flush: Flushes the buffered active and sleep sets to TCS
+ * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
*
- * @dev: The device making the request
+ * @ctrlr: Controller making request to flush cached data
*
- * Return: -EBUSY if the controller is busy, probably waiting on a response
- * to a RPMH request sent earlier.
- *
- * This function is always called from the sleep code from the last CPU
- * that is powering down the entire system. Since no other RPMH API would be
- * executing at this time, it is safe to run lockless.
+ * Return:
+ * * 0 - Success
+ * * Error code - Otherwise
*/
-int rpmh_flush(const struct device *dev)
+int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
- struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
- int ret;
+ int ret = 0;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Currently rpmh_flush() is only called when we think we're running
+ * on the last processor. If the lock is busy it means another
+ * processor is up and it's better to abort than spin.
+ */
+ if (!spin_trylock(&ctrlr->cache_lock))
+ return -EBUSY;
if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
- return 0;
+ goto exit;
}
/* Invalidate the TCSes first to avoid stale data */
- do {
- ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
- } while (ret == -EAGAIN);
- if (ret)
- return ret;
+ rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
- return ret;
+ goto exit;
- /*
- * Nobody else should be calling this function other than system PM,
- * hence we can run without locks.
- */
list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
__func__, p->addr, p->sleep_val, p->wake_val);
continue;
}
- ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+ ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
+ p->sleep_val);
if (ret)
- return ret;
- ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
- p->addr, p->wake_val);
+ goto exit;
+ ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
+ p->wake_val);
if (ret)
- return ret;
+ goto exit;
}
ctrlr->dirty = false;
- return 0;
+exit:
+ spin_unlock(&ctrlr->cache_lock);
+ return ret;
}
-EXPORT_SYMBOL(rpmh_flush);
/**
* rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
@@ -500,7 +497,7 @@
*
* Invalidate the sleep and wake values in batch_cache.
*/
-int rpmh_invalidate(const struct device *dev)
+void rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
struct batch_cache_req *req, *tmp;
@@ -512,7 +509,5 @@
INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-
- return 0;
}
EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index aa24237..436ec79 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
@@ -23,9 +24,13 @@
* struct rpmhpd - top level RPMh power domain resource data structure
* @dev: rpmh power domain controller device
* @pd: generic_pm_domain corrresponding to the power domain
+ * @parent: generic_pm_domain corrresponding to the parent's power domain
* @peer: A peer power domain in case Active only Voting is
* supported
* @active_only: True if it represents an Active only peer
+ * @corner: current corner
+ * @active_corner: current active corner
+ * @enable_corner: lowest non-zero corner
* @level: An array of level (vlvl) to corner (hlvl) mappings
* derived from cmd-db
* @level_count: Number of levels supported by the power domain. max
@@ -43,6 +48,7 @@
const bool active_only;
unsigned int corner;
unsigned int active_corner;
+ unsigned int enable_corner;
u32 level[RPMH_ARC_MAX_LEVELS];
size_t level_count;
bool enabled;
@@ -131,10 +137,84 @@
.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
};
+/* SM8150 RPMH powerdomains */
+
+static struct rpmhpd sm8150_mmcx_ao;
+static struct rpmhpd sm8150_mmcx = {
+ .pd = { .name = "mmcx", },
+ .peer = &sm8150_mmcx_ao,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd sm8150_mmcx_ao = {
+ .pd = { .name = "mmcx_ao", },
+ .active_only = true,
+ .peer = &sm8150_mmcx,
+ .res_name = "mmcx.lvl",
+};
+
+static struct rpmhpd *sm8150_rpmhpds[] = {
+ [SM8150_MSS] = &sdm845_mss,
+ [SM8150_EBI] = &sdm845_ebi,
+ [SM8150_LMX] = &sdm845_lmx,
+ [SM8150_LCX] = &sdm845_lcx,
+ [SM8150_GFX] = &sdm845_gfx,
+ [SM8150_MX] = &sdm845_mx,
+ [SM8150_MX_AO] = &sdm845_mx_ao,
+ [SM8150_CX] = &sdm845_cx,
+ [SM8150_CX_AO] = &sdm845_cx_ao,
+ [SM8150_MMCX] = &sm8150_mmcx,
+ [SM8150_MMCX_AO] = &sm8150_mmcx_ao,
+};
+
+static const struct rpmhpd_desc sm8150_desc = {
+ .rpmhpds = sm8150_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8150_rpmhpds),
+};
+
+static struct rpmhpd *sm8250_rpmhpds[] = {
+ [SM8250_CX] = &sdm845_cx,
+ [SM8250_CX_AO] = &sdm845_cx_ao,
+ [SM8250_EBI] = &sdm845_ebi,
+ [SM8250_GFX] = &sdm845_gfx,
+ [SM8250_LCX] = &sdm845_lcx,
+ [SM8250_LMX] = &sdm845_lmx,
+ [SM8250_MMCX] = &sm8150_mmcx,
+ [SM8250_MMCX_AO] = &sm8150_mmcx_ao,
+ [SM8250_MX] = &sdm845_mx,
+ [SM8250_MX_AO] = &sdm845_mx_ao,
+};
+
+static const struct rpmhpd_desc sm8250_desc = {
+ .rpmhpds = sm8250_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8250_rpmhpds),
+};
+
+/* SC7180 RPMH powerdomains */
+static struct rpmhpd *sc7180_rpmhpds[] = {
+ [SC7180_CX] = &sdm845_cx,
+ [SC7180_CX_AO] = &sdm845_cx_ao,
+ [SC7180_GFX] = &sdm845_gfx,
+ [SC7180_MX] = &sdm845_mx,
+ [SC7180_MX_AO] = &sdm845_mx_ao,
+ [SC7180_LMX] = &sdm845_lmx,
+ [SC7180_LCX] = &sdm845_lcx,
+ [SC7180_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sc7180_desc = {
+ .rpmhpds = sc7180_rpmhpds,
+ .num_pds = ARRAY_SIZE(sc7180_rpmhpds),
+};
+
static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
+ { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
unsigned int corner, bool sync)
@@ -217,13 +297,13 @@
static int rpmhpd_power_on(struct generic_pm_domain *domain)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
- int ret = 0;
+ unsigned int corner;
+ int ret;
mutex_lock(&rpmhpd_lock);
- if (pd->corner)
- ret = rpmhpd_aggregate_corner(pd, pd->corner);
-
+ corner = max(pd->corner, pd->enable_corner);
+ ret = rpmhpd_aggregate_corner(pd, corner);
if (!ret)
pd->enabled = true;
@@ -268,6 +348,10 @@
i--;
if (pd->enabled) {
+ /* Ensure that the domain isn't turn off */
+ if (i < pd->enable_corner)
+ i = pd->enable_corner;
+
ret = rpmhpd_aggregate_corner(pd, i);
if (ret)
goto out;
@@ -304,6 +388,10 @@
for (i = 0; i < rpmhpd->level_count; i++) {
rpmhpd->level[i] = buf[i];
+ /* Remember the first corner with non-zero level */
+ if (!rpmhpd->level[rpmhpd->enable_corner] && rpmhpd->level[i])
+ rpmhpd->enable_corner = i;
+
/*
* The AUX data may be zero padded. These 0 valued entries at
* the end of the map must be ignored.
@@ -405,3 +493,6 @@
return platform_driver_register(&rpmhpd_driver);
}
core_initcall(rpmhpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 3c1a55c..f2168e4 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/of.h>
@@ -115,6 +116,28 @@
static DEFINE_MUTEX(rpmpd_lock);
+/* msm8976 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
+DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
+
+DEFINE_RPMPD_VFL(msm8976, vddcx_vfl, RWSC, 2);
+DEFINE_RPMPD_VFL(msm8976, vddmx_vfl, RWSM, 6);
+
+static struct rpmpd *msm8976_rpmpds[] = {
+ [MSM8976_VDDCX] = &msm8976_vddcx,
+ [MSM8976_VDDCX_AO] = &msm8976_vddcx_ao,
+ [MSM8976_VDDCX_VFL] = &msm8976_vddcx_vfl,
+ [MSM8976_VDDMX] = &msm8976_vddmx,
+ [MSM8976_VDDMX_AO] = &msm8976_vddmx_ao,
+ [MSM8976_VDDMX_VFL] = &msm8976_vddmx_vfl,
+};
+
+static const struct rpmpd_desc msm8976_desc = {
+ .rpmpds = msm8976_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8976_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_HIGH,
+};
+
/* msm8996 RPM Power domains */
DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
@@ -198,11 +221,13 @@
};
static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmpd_match_table);
static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
{
@@ -399,3 +424,6 @@
return platform_driver_register(&rpmpd_driver);
}
core_initcall(rpmpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index fa9dd12..b93218c 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -19,12 +19,15 @@
/**
* struct qcom_smd_rpm - state of the rpm device driver
* @rpm_channel: reference to the smd channel
+ * @icc: interconnect proxy device
+ * @dev: rpm device
* @ack: completion for acks
* @lock: mutual exclusion around the send/complete pair
* @ack_status: result of the rpm request
*/
struct qcom_smd_rpm {
struct rpmsg_endpoint *rpm_channel;
+ struct platform_device *icc;
struct device *dev;
struct completion ack;
@@ -84,6 +87,7 @@
/**
* qcom_rpm_smd_write - write @buf to @type:@id
* @rpm: rpm handle
+ * @state: active/sleep state flags
* @type: resource type
* @id: resource identifier
* @buf: the data to be written
@@ -193,6 +197,7 @@
static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev)
{
struct qcom_smd_rpm *rpm;
+ int ret;
rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL);
if (!rpm)
@@ -205,18 +210,34 @@
rpm->rpm_channel = rpdev->ept;
dev_set_drvdata(&rpdev->dev, rpm);
- return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+ rpm->icc = platform_device_register_data(&rpdev->dev, "icc_smd_rpm", -1,
+ NULL, 0);
+ if (IS_ERR(rpm->icc))
+ return PTR_ERR(rpm->icc);
+
+ ret = of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev);
+ if (ret)
+ platform_device_unregister(rpm->icc);
+
+ return ret;
}
static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev)
{
+ struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev);
+
+ platform_device_unregister(rpm->icc);
of_platform_depopulate(&rpdev->dev);
}
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
+ { .compatible = "qcom,rpm-ipq6018" },
{ .compatible = "qcom,rpm-msm8916" },
+ { .compatible = "qcom,rpm-msm8936" },
{ .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8976" },
+ { .compatible = "qcom,rpm-msm8994" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
{ .compatible = "qcom,rpm-sdm660" },
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 42e0b8f..a9709aa 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -475,10 +475,8 @@
goto report_read_failure;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+ if (irq < 0)
return irq;
- }
smp2p->mbox_client.dev = &pdev->dev;
smp2p->mbox_client.knows_txdone = true;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 176696f..60c82dc 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -24,6 +24,7 @@
#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
/*
* SMEM item id, used to acquire handles to respective
@@ -121,6 +122,16 @@
__le32 chip_family;
__le32 raw_device_family;
__le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_defective_parts;
+ __le32 ndefective_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
};
#ifdef CONFIG_DEBUG_FS
@@ -135,6 +146,12 @@
u32 raw_ver;
u32 hw_plat;
u32 fmt;
+ u32 nproduct_id;
+ u32 num_clusters;
+ u32 ncluster_array_offset;
+ u32 num_defective_parts;
+ u32 ndefective_parts_array_offset;
+ u32 nmodem_supported;
};
struct smem_image_version {
@@ -177,6 +194,7 @@
{ 186, "MSM8674" },
{ 194, "MSM8974PRO" },
{ 206, "MSM8916" },
+ { 207, "MSM8994" },
{ 208, "APQ8074-AA" },
{ 209, "APQ8074-AB" },
{ 210, "APQ8074PRO" },
@@ -188,16 +206,28 @@
{ 216, "MSM8674PRO" },
{ 217, "MSM8974-AA" },
{ 218, "MSM8974-AB" },
+ { 233, "MSM8936" },
+ { 239, "MSM8939" },
+ { 240, "APQ8036" },
+ { 241, "APQ8039" },
{ 246, "MSM8996" },
{ 247, "APQ8016" },
{ 248, "MSM8216" },
{ 249, "MSM8116" },
{ 250, "MSM8616" },
+ { 251, "MSM8992" },
+ { 253, "APQ8094" },
{ 291, "APQ8096" },
{ 305, "MSM8996SG" },
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
+ { 318, "SDM630" },
+ { 321, "SDM845" },
+ { 341, "SDA845" },
+ { 356, "SM8250" },
+ { 402, "IPQ6018" },
+ { 425, "SC7180" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
@@ -250,7 +280,10 @@
if (model < 0)
return -EINVAL;
- seq_printf(seq, "%s\n", pmic_models[model]);
+ if (model < ARRAY_SIZE(pmic_models) && pmic_models[model])
+ seq_printf(seq, "%s\n", pmic_models[model]);
+ else
+ seq_printf(seq, "unknown (%d)\n", model);
return 0;
}
@@ -266,16 +299,26 @@
return 0;
}
+static int qcom_show_chip_id(struct seq_file *seq, void *p)
+{
+ struct socinfo *socinfo = seq->private;
+
+ seq_printf(seq, "%s\n", socinfo->chip_id);
+
+ return 0;
+}
+
QCOM_OPEN(build_id, qcom_show_build_id);
QCOM_OPEN(pmic_model, qcom_show_pmic_model);
QCOM_OPEN(pmic_die_rev, qcom_show_pmic_die_revision);
+QCOM_OPEN(chip_id, qcom_show_chip_id);
#define DEFINE_IMAGE_OPS(type) \
static int show_image_##type(struct seq_file *seq, void *p) \
{ \
struct smem_image_version *image_version = seq->private; \
seq_puts(seq, image_version->type); \
- seq_puts(seq, "\n"); \
+ seq_putc(seq, '\n'); \
return 0; \
} \
static int open_image_##type(struct inode *inode, struct file *file) \
@@ -306,7 +349,38 @@
qcom_socinfo->info.fmt = __le32_to_cpu(info->fmt);
+ debugfs_create_x32("info_fmt", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.fmt);
+
switch (qcom_socinfo->info.fmt) {
+ case SOCINFO_VERSION(0, 15):
+ qcom_socinfo->info.nmodem_supported = __le32_to_cpu(info->nmodem_supported);
+
+ debugfs_create_u32("nmodem_supported", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nmodem_supported);
+ fallthrough;
+ case SOCINFO_VERSION(0, 14):
+ qcom_socinfo->info.num_clusters = __le32_to_cpu(info->num_clusters);
+ qcom_socinfo->info.ncluster_array_offset = __le32_to_cpu(info->ncluster_array_offset);
+ qcom_socinfo->info.num_defective_parts = __le32_to_cpu(info->num_defective_parts);
+ qcom_socinfo->info.ndefective_parts_array_offset = __le32_to_cpu(info->ndefective_parts_array_offset);
+
+ debugfs_create_u32("num_clusters", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_clusters);
+ debugfs_create_u32("ncluster_array_offset", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ncluster_array_offset);
+ debugfs_create_u32("num_defective_parts", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.num_defective_parts);
+ debugfs_create_u32("ndefective_parts_array_offset", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.ndefective_parts_array_offset);
+ fallthrough;
+ case SOCINFO_VERSION(0, 13):
+ qcom_socinfo->info.nproduct_id = __le32_to_cpu(info->nproduct_id);
+
+ debugfs_create_u32("nproduct_id", 0400, qcom_socinfo->dbg_root,
+ &qcom_socinfo->info.nproduct_id);
+ DEBUGFS_ADD(info, chip_id);
+ fallthrough;
case SOCINFO_VERSION(0, 12):
qcom_socinfo->info.chip_family =
__le32_to_cpu(info->chip_family);
@@ -323,7 +397,7 @@
debugfs_create_x32("raw_device_number", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_device_num);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 11):
case SOCINFO_VERSION(0, 10):
case SOCINFO_VERSION(0, 9):
@@ -331,12 +405,12 @@
debugfs_create_u32("foundry_id", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.foundry_id);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 8):
case SOCINFO_VERSION(0, 7):
DEBUGFS_ADD(info, pmic_model);
DEBUGFS_ADD(info, pmic_die_rev);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 6):
qcom_socinfo->info.hw_plat_subtype =
__le32_to_cpu(info->hw_plat_subtype);
@@ -344,7 +418,7 @@
debugfs_create_u32("hardware_platform_subtype", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat_subtype);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 5):
qcom_socinfo->info.accessory_chip =
__le32_to_cpu(info->accessory_chip);
@@ -352,27 +426,27 @@
debugfs_create_u32("accessory_chip", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.accessory_chip);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 4):
qcom_socinfo->info.plat_ver = __le32_to_cpu(info->plat_ver);
debugfs_create_u32("platform_version", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.plat_ver);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 3):
qcom_socinfo->info.hw_plat = __le32_to_cpu(info->hw_plat);
debugfs_create_u32("hardware_platform", 0400,
qcom_socinfo->dbg_root,
&qcom_socinfo->info.hw_plat);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 2):
qcom_socinfo->info.raw_ver = __le32_to_cpu(info->raw_ver);
debugfs_create_u32("raw_version", 0400, qcom_socinfo->dbg_root,
&qcom_socinfo->info.raw_ver);
- /* Fall through */
+ fallthrough;
case SOCINFO_VERSION(0, 1):
DEBUGFS_ADD(info, build_id);
break;
@@ -447,7 +521,7 @@
/* Feed the soc specific unique data into entropy pool */
add_device_randomness(info, item_size);
- platform_set_drvdata(pdev, qs->soc_dev);
+ platform_set_drvdata(pdev, qs);
return 0;
}
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
deleted file mode 100644
index 8e10e02..0000000
--- a/drivers/soc/qcom/spm.c
+++ /dev/null
@@ -1,378 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
- * Copyright (c) 2014,2015, Linaro Ltd.
- *
- * SAW power controller driver
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/cpuidle.h>
-#include <linux/cpu_pm.h>
-#include <linux/qcom_scm.h>
-
-#include <asm/cpuidle.h>
-#include <asm/proc-fns.h>
-#include <asm/suspend.h>
-
-#define MAX_PMIC_DATA 2
-#define MAX_SEQ_DATA 64
-#define SPM_CTL_INDEX 0x7f
-#define SPM_CTL_INDEX_SHIFT 4
-#define SPM_CTL_EN BIT(0)
-
-enum pm_sleep_mode {
- PM_SLEEP_MODE_STBY,
- PM_SLEEP_MODE_RET,
- PM_SLEEP_MODE_SPC,
- PM_SLEEP_MODE_PC,
- PM_SLEEP_MODE_NR,
-};
-
-enum spm_reg {
- SPM_REG_CFG,
- SPM_REG_SPM_CTL,
- SPM_REG_DLY,
- SPM_REG_PMIC_DLY,
- SPM_REG_PMIC_DATA_0,
- SPM_REG_PMIC_DATA_1,
- SPM_REG_VCTL,
- SPM_REG_SEQ_ENTRY,
- SPM_REG_SPM_STS,
- SPM_REG_PMIC_STS,
- SPM_REG_NR,
-};
-
-struct spm_reg_data {
- const u8 *reg_offset;
- u32 spm_cfg;
- u32 spm_dly;
- u32 pmic_dly;
- u32 pmic_data[MAX_PMIC_DATA];
- u8 seq[MAX_SEQ_DATA];
- u8 start_index[PM_SLEEP_MODE_NR];
-};
-
-struct spm_driver_data {
- void __iomem *reg_base;
- const struct spm_reg_data *reg_data;
-};
-
-static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x30,
- [SPM_REG_DLY] = 0x34,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8974, 8084 */
-static const struct spm_reg_data spm_reg_8974_8084_cpu = {
- .reg_offset = spm_reg_offset_v2_1,
- .spm_cfg = 0x1,
- .spm_dly = 0x3C102800,
- .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
- 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
- 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 3,
-};
-
-static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x20,
- [SPM_REG_PMIC_DLY] = 0x24,
- [SPM_REG_PMIC_DATA_0] = 0x28,
- [SPM_REG_PMIC_DATA_1] = 0x2C,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8064 */
-static const struct spm_reg_data spm_reg_8064_cpu = {
- .reg_offset = spm_reg_offset_v1_1,
- .spm_cfg = 0x1F,
- .pmic_dly = 0x02020004,
- .pmic_data[0] = 0x0084009C,
- .pmic_data[1] = 0x00A4001C,
- .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
- 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 2,
-};
-
-static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
-
-typedef int (*idle_fn)(void);
-static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
-
-static inline void spm_register_write(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- if (drv->reg_data->reg_offset[reg])
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
-}
-
-/* Ensure a guaranteed write, before return */
-static inline void spm_register_write_sync(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- u32 ret;
-
- if (!drv->reg_data->reg_offset[reg])
- return;
-
- do {
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- ret = readl_relaxed(drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- if (ret == val)
- break;
- cpu_relax();
- } while (1);
-}
-
-static inline u32 spm_register_read(struct spm_driver_data *drv,
- enum spm_reg reg)
-{
- return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
-}
-
-static void spm_set_low_power_mode(struct spm_driver_data *drv,
- enum pm_sleep_mode mode)
-{
- u32 start_index;
- u32 ctl_val;
-
- start_index = drv->reg_data->start_index[mode];
-
- ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
- ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
- ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
- ctl_val |= SPM_CTL_EN;
- spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
-}
-
-static int qcom_pm_collapse(unsigned long int unused)
-{
- qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
-
- /*
- * Returns here only if there was a pending interrupt and we did not
- * power down as a result.
- */
- return -1;
-}
-
-static int qcom_cpu_spc(void)
-{
- int ret;
- struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
-
- spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
- ret = cpu_suspend(0, qcom_pm_collapse);
- /*
- * ARM common code executes WFI without calling into our driver and
- * if the SPM mode is not reset, then we may accidently power down the
- * cpu when we intended only to gate the cpu clock.
- * Ensure the state is set to standby before returning.
- */
- spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
-
- return ret;
-}
-
-static int qcom_idle_enter(unsigned long index)
-{
- return __this_cpu_read(qcom_idle_ops)[index]();
-}
-
-static const struct of_device_id qcom_idle_state_match[] __initconst = {
- { .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
- { },
-};
-
-static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
-{
- const struct of_device_id *match_id;
- struct device_node *state_node;
- int i;
- int state_count = 1;
- idle_fn idle_fns[CPUIDLE_STATE_MAX];
- idle_fn *fns;
- cpumask_t mask;
- bool use_scm_power_down = false;
-
- if (!qcom_scm_is_available())
- return -EPROBE_DEFER;
-
- for (i = 0; ; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
- if (!state_node)
- break;
-
- if (!of_device_is_available(state_node))
- continue;
-
- if (i == CPUIDLE_STATE_MAX) {
- pr_warn("%s: cpuidle states reached max possible\n",
- __func__);
- break;
- }
-
- match_id = of_match_node(qcom_idle_state_match, state_node);
- if (!match_id)
- return -ENODEV;
-
- idle_fns[state_count] = match_id->data;
-
- /* Check if any of the states allow power down */
- if (match_id->data == qcom_cpu_spc)
- use_scm_power_down = true;
-
- state_count++;
- }
-
- if (state_count == 1)
- goto check_spm;
-
- fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
- GFP_KERNEL);
- if (!fns)
- return -ENOMEM;
-
- for (i = 1; i < state_count; i++)
- fns[i] = idle_fns[i];
-
- if (use_scm_power_down) {
- /* We have atleast one power down mode */
- cpumask_clear(&mask);
- cpumask_set_cpu(cpu, &mask);
- qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
- }
-
- per_cpu(qcom_idle_ops, cpu) = fns;
-
- /*
- * SPM probe for the cpu should have happened by now, if the
- * SPM device does not exist, return -ENXIO to indicate that the
- * cpu does not support idle states.
- */
-check_spm:
- return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
-}
-
-static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
- .suspend = qcom_idle_enter,
- .init = qcom_cpuidle_init,
-};
-
-CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
-CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
-
-static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
- int *spm_cpu)
-{
- struct spm_driver_data *drv = NULL;
- struct device_node *cpu_node, *saw_node;
- int cpu;
- bool found = 0;
-
- for_each_possible_cpu(cpu) {
- cpu_node = of_cpu_device_node_get(cpu);
- if (!cpu_node)
- continue;
- saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
- found = (saw_node == pdev->dev.of_node);
- of_node_put(saw_node);
- of_node_put(cpu_node);
- if (found)
- break;
- }
-
- if (found) {
- drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
- if (drv)
- *spm_cpu = cpu;
- }
-
- return drv;
-}
-
-static const struct of_device_id spm_match_table[] = {
- { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
- .data = &spm_reg_8064_cpu },
- { },
-};
-
-static int spm_dev_probe(struct platform_device *pdev)
-{
- struct spm_driver_data *drv;
- struct resource *res;
- const struct of_device_id *match_id;
- void __iomem *addr;
- int cpu;
-
- drv = spm_get_drv(pdev, &cpu);
- if (!drv)
- return -EINVAL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drv->reg_base))
- return PTR_ERR(drv->reg_base);
-
- match_id = of_match_node(spm_match_table, pdev->dev.of_node);
- if (!match_id)
- return -ENODEV;
-
- drv->reg_data = match_id->data;
-
- /* Write the SPM sequences first.. */
- addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
- __iowrite32_copy(addr, drv->reg_data->seq,
- ARRAY_SIZE(drv->reg_data->seq) / 4);
-
- /*
- * ..and then the control registers.
- * On some SoC if the control registers are written first and if the
- * CPU was held in reset, the reset signal could trigger the SPM state
- * machine, before the sequences are completely written.
- */
- spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
- spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
- spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
- spm_register_write(drv, SPM_REG_PMIC_DATA_0,
- drv->reg_data->pmic_data[0]);
- spm_register_write(drv, SPM_REG_PMIC_DATA_1,
- drv->reg_data->pmic_data[1]);
-
- /* Set up Standby as the default low power mode */
- spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
-
- per_cpu(cpu_spm_drv, cpu) = drv;
-
- return 0;
-}
-
-static struct platform_driver spm_driver = {
- .probe = spm_dev_probe,
- .driver = {
- .name = "saw",
- .of_match_table = spm_match_table,
- },
-};
-
-builtin_platform_driver(spm_driver);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 3c5e017..b70bbc3 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-config SOC_RENESAS
+menuconfig SOC_RENESAS
bool "Renesas SoC driver support" if COMPILE_TEST && !ARCH_RENESAS
default y if ARCH_RENESAS
select SOC_BUS
@@ -49,12 +49,74 @@
#comment "Renesas ARM SoCs System Type"
config ARCH_EMEV2
- bool "Emma Mobile EV2"
+ bool "ARM32 Platform support for Emma Mobile EV2"
select HAVE_ARM_SCU if SMP
select SYS_SUPPORTS_EM_STI
+config ARCH_R8A7794
+ bool "ARM32 Platform support for R-Car E2"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A7794
+
+config ARCH_R8A7779
+ bool "ARM32 Platform support for R-Car H1"
+ select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select SYSC_R8A7779
+
+config ARCH_R8A7790
+ bool "ARM32 Platform support for R-Car H2"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
+ select I2C
+ select SYSC_R8A7790
+
+config ARCH_R8A7778
+ bool "ARM32 Platform support for R-Car M1A"
+ select ARCH_RCAR_GEN1
+ select ARM_ERRATA_754322
+
+config ARCH_R8A7793
+ bool "ARM32 Platform support for R-Car M2-N"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select I2C
+ select SYSC_R8A7791
+
+config ARCH_R8A7791
+ bool "ARM32 Platform support for R-Car M2-W"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select I2C
+ select SYSC_R8A7791
+
+config ARCH_R8A7792
+ bool "ARM32 Platform support for R-Car V2H"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select SYSC_R8A7792
+
+config ARCH_R8A7740
+ bool "ARM32 Platform support for R-Mobile A1"
+ select ARCH_RMOBILE
+ select ARM_ERRATA_754322
+ select RENESAS_INTC_IRQPIN
+
+config ARCH_R8A73A4
+ bool "ARM32 Platform support for R-Mobile APE6"
+ select ARCH_RMOBILE
+ select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
+ select HAVE_ARM_ARCH_TIMER
+ select RENESAS_IRQC
+
config ARCH_R7S72100
- bool "RZ/A1H (R7S72100)"
+ bool "ARM32 Platform support for RZ/A1H"
select ARM_ERRATA_754322
select PM
select PM_GENERIC_DOMAINS
@@ -63,106 +125,53 @@
select SYS_SUPPORTS_SH_MTU2
config ARCH_R7S9210
- bool "RZ/A2 (R7S9210)"
+ bool "ARM32 Platform support for RZ/A2"
select PM
select PM_GENERIC_DOMAINS
select RENESAS_OSTM
select RENESAS_RZA1_IRQC
-config ARCH_R8A73A4
- bool "R-Mobile APE6 (R8A73A40)"
- select ARCH_RMOBILE
+config ARCH_R8A77470
+ bool "ARM32 Platform support for RZ/G1C"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A77470
+
+config ARCH_R8A7745
+ bool "ARM32 Platform support for RZ/G1E"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_814220
+ select SYSC_R8A7745
+
+config ARCH_R8A7742
+ bool "ARM32 Platform support for RZ/G1H"
+ select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select ARM_ERRATA_814220
- select HAVE_ARM_ARCH_TIMER
- select RENESAS_IRQC
-
-config ARCH_R8A7740
- bool "R-Mobile A1 (R8A77400)"
- select ARCH_RMOBILE
- select ARM_ERRATA_754322
- select RENESAS_INTC_IRQPIN
+ select SYSC_R8A7742
config ARCH_R8A7743
- bool "RZ/G1M (R8A77430)"
+ bool "ARM32 Platform support for RZ/G1M"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select SYSC_R8A7743
config ARCH_R8A7744
- bool "RZ/G1N (R8A77440)"
+ bool "ARM32 Platform support for RZ/G1N"
select ARCH_RCAR_GEN2
select ARM_ERRATA_798181 if SMP
select SYSC_R8A7743
-config ARCH_R8A7745
- bool "RZ/G1E (R8A77450)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_814220
- select SYSC_R8A7745
-
-config ARCH_R8A77470
- bool "RZ/G1C (R8A77470)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_814220
- select SYSC_R8A77470
-
-config ARCH_R8A7778
- bool "R-Car M1A (R8A77781)"
- select ARCH_RCAR_GEN1
- select ARM_ERRATA_754322
-
-config ARCH_R8A7779
- bool "R-Car H1 (R8A77790)"
- select ARCH_RCAR_GEN1
- select ARM_ERRATA_754322
- select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if SMP
- select SYSC_R8A7779
-
-config ARCH_R8A7790
- bool "R-Car H2 (R8A77900)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_798181 if SMP
- select ARM_ERRATA_814220
- select I2C
- select SYSC_R8A7790
-
-config ARCH_R8A7791
- bool "R-Car M2-W (R8A77910)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_798181 if SMP
- select I2C
- select SYSC_R8A7791
-
-config ARCH_R8A7792
- bool "R-Car V2H (R8A77920)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_798181 if SMP
- select SYSC_R8A7792
-
-config ARCH_R8A7793
- bool "R-Car M2-N (R8A7793)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_798181 if SMP
- select I2C
- select SYSC_R8A7791
-
-config ARCH_R8A7794
- bool "R-Car E2 (R8A77940)"
- select ARCH_RCAR_GEN2
- select ARM_ERRATA_814220
- select SYSC_R8A7794
-
config ARCH_R9A06G032
- bool "RZ/N1D (R9A06G032)"
+ bool "ARM32 Platform support for RZ/N1D"
select ARCH_RZN1
select ARM_ERRATA_814220
config ARCH_SH73A0
- bool "SH-Mobile AG5 (R8A73A00)"
+ bool "ARM32 Platform support for SH-Mobile AG5"
select ARCH_RMOBILE
select ARM_ERRATA_754322
+ select ARM_GLOBAL_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select RENESAS_INTC_IRQPIN
@@ -171,148 +180,201 @@
if ARM64
-config ARCH_R8A774A1
- bool "Renesas RZ/G2M SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774A1
- help
- This enables support for the Renesas RZ/G2M SoC.
-
-config ARCH_R8A774C0
- bool "Renesas RZ/G2E SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A774C0
- help
- This enables support for the Renesas RZ/G2E SoC.
-
-config ARCH_R8A7795
- bool "Renesas R-Car H3 SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A7795
- help
- This enables support for the Renesas R-Car H3 SoC.
-
-config ARCH_R8A7796
- bool "Renesas R-Car M3-W SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A7796
- help
- This enables support for the Renesas R-Car M3-W SoC.
-
-config ARCH_R8A77965
- bool "Renesas R-Car M3-N SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A77965
- help
- This enables support for the Renesas R-Car M3-N SoC.
-
-config ARCH_R8A77970
- bool "Renesas R-Car V3M SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A77970
- help
- This enables support for the Renesas R-Car V3M SoC.
-
-config ARCH_R8A77980
- bool "Renesas R-Car V3H SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A77980
- help
- This enables support for the Renesas R-Car V3H SoC.
-
-config ARCH_R8A77990
- bool "Renesas R-Car E3 SoC Platform"
- select ARCH_RCAR_GEN3
- select SYSC_R8A77990
- help
- This enables support for the Renesas R-Car E3 SoC.
-
config ARCH_R8A77995
- bool "Renesas R-Car D3 SoC Platform"
+ bool "ARM64 Platform support for R-Car D3"
select ARCH_RCAR_GEN3
select SYSC_R8A77995
help
This enables support for the Renesas R-Car D3 SoC.
+config ARCH_R8A77990
+ bool "ARM64 Platform support for R-Car E3"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77990
+ help
+ This enables support for the Renesas R-Car E3 SoC.
+
+config ARCH_R8A77950
+ bool "ARM64 Platform support for R-Car H3 ES1.x"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A7795
+ help
+ This enables support for the Renesas R-Car H3 SoC (revision 1.x).
+
+config ARCH_R8A77951
+ bool "ARM64 Platform support for R-Car H3 ES2.0+"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A7795
+ help
+ This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
+ later).
+
+config ARCH_R8A77965
+ bool "ARM64 Platform support for R-Car M3-N"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77965
+ help
+ This enables support for the Renesas R-Car M3-N SoC.
+
+config ARCH_R8A77960
+ bool "ARM64 Platform support for R-Car M3-W"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77960
+ help
+ This enables support for the Renesas R-Car M3-W SoC.
+
+config ARCH_R8A77961
+ bool "ARM64 Platform support for R-Car M3-W+"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77961
+ help
+ This enables support for the Renesas R-Car M3-W+ SoC.
+
+config ARCH_R8A77980
+ bool "ARM64 Platform support for R-Car V3H"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77980
+ help
+ This enables support for the Renesas R-Car V3H SoC.
+
+config ARCH_R8A77970
+ bool "ARM64 Platform support for R-Car V3M"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A77970
+ help
+ This enables support for the Renesas R-Car V3M SoC.
+
+config ARCH_R8A779A0
+ bool "ARM64 Platform support for R-Car V3U"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779A0
+ help
+ This enables support for the Renesas R-Car V3U SoC.
+
+config ARCH_R8A774C0
+ bool "ARM64 Platform support for RZ/G2E"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774C0
+ help
+ This enables support for the Renesas RZ/G2E SoC.
+
+config ARCH_R8A774E1
+ bool "ARM64 Platform support for RZ/G2H"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774E1
+ help
+ This enables support for the Renesas RZ/G2H SoC.
+
+config ARCH_R8A774A1
+ bool "ARM64 Platform support for RZ/G2M"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774A1
+ help
+ This enables support for the Renesas RZ/G2M SoC.
+
+config ARCH_R8A774B1
+ bool "ARM64 Platform support for RZ/G2N"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A774B1
+ help
+ This enables support for the Renesas RZ/G2N SoC.
+
endif # ARM64
-# SoC
-config SYSC_R8A7743
- bool "RZ/G1M System Controller support" if COMPILE_TEST
- select SYSC_RCAR
+config RST_RCAR
+ bool "Reset Controller support for R-Car" if COMPILE_TEST
-config SYSC_R8A7745
- bool "RZ/G1E System Controller support" if COMPILE_TEST
- select SYSC_RCAR
+config SYSC_RCAR
+ bool "System Controller support for R-Car" if COMPILE_TEST
-config SYSC_R8A77470
- bool "RZ/G1C System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A774A1
- bool "RZ/G2M System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A774C0
- bool "RZ/G2E System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7779
- bool "R-Car H1 System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7790
- bool "R-Car H2 System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7791
- bool "R-Car M2-W/N System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7792
- bool "R-Car V2H System Controller support" if COMPILE_TEST
+config SYSC_R8A77995
+ bool "System Controller support for R-Car D3" if COMPILE_TEST
select SYSC_RCAR
config SYSC_R8A7794
- bool "R-Car E2 System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7795
- bool "R-Car H3 System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A7796
- bool "R-Car M3-W System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77965
- bool "R-Car M3-N System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77970
- bool "R-Car V3M System Controller support" if COMPILE_TEST
- select SYSC_RCAR
-
-config SYSC_R8A77980
- bool "R-Car V3H System Controller support" if COMPILE_TEST
+ bool "System Controller support for R-Car E2" if COMPILE_TEST
select SYSC_RCAR
config SYSC_R8A77990
- bool "R-Car E3 System Controller support" if COMPILE_TEST
+ bool "System Controller support for R-Car E3" if COMPILE_TEST
select SYSC_RCAR
-config SYSC_R8A77995
- bool "R-Car D3 System Controller support" if COMPILE_TEST
+config SYSC_R8A7779
+ bool "System Controller support for R-Car H1" if COMPILE_TEST
select SYSC_RCAR
-# Family
-config RST_RCAR
- bool "R-Car Reset Controller support" if COMPILE_TEST
+config SYSC_R8A7790
+ bool "System Controller support for R-Car H2" if COMPILE_TEST
+ select SYSC_RCAR
-config SYSC_RCAR
- bool "R-Car System Controller support" if COMPILE_TEST
+config SYSC_R8A7795
+ bool "System Controller support for R-Car H3" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7791
+ bool "System Controller support for R-Car M2-W/N" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77965
+ bool "System Controller support for R-Car M3-N" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77960
+ bool "System Controller support for R-Car M3-W" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77961
+ bool "System Controller support for R-Car M3-W+" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7792
+ bool "System Controller support for R-Car V2H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77980
+ bool "System Controller support for R-Car V3H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A77970
+ bool "System Controller support for R-Car V3M" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A779A0
+ bool "System Controller support for R-Car V3U" if COMPILE_TEST
config SYSC_RMOBILE
- bool "R-Mobile System Controller support" if COMPILE_TEST
+ bool "System Controller support for R-Mobile" if COMPILE_TEST
+
+config SYSC_R8A77470
+ bool "System Controller support for RZ/G1C" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7745
+ bool "System Controller support for RZ/G1E" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7742
+ bool "System Controller support for RZ/G1H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A7743
+ bool "System Controller support for RZ/G1M" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774C0
+ bool "System Controller support for RZ/G2E" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774E1
+ bool "System Controller support for RZ/G2H" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774A1
+ bool "System Controller support for RZ/G2M" if COMPILE_TEST
+ select SYSC_RCAR
+
+config SYSC_R8A774B1
+ bool "System Controller support for RZ/G2N" if COMPILE_TEST
+ select SYSC_RCAR
endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 00764d5..9b29bed 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -3,23 +3,28 @@
obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
# SoC
+obj-$(CONFIG_SYSC_R8A7742) += r8a7742-sysc.o
obj-$(CONFIG_SYSC_R8A7743) += r8a7743-sysc.o
obj-$(CONFIG_SYSC_R8A7745) += r8a7745-sysc.o
obj-$(CONFIG_SYSC_R8A77470) += r8a77470-sysc.o
obj-$(CONFIG_SYSC_R8A774A1) += r8a774a1-sysc.o
+obj-$(CONFIG_SYSC_R8A774B1) += r8a774b1-sysc.o
obj-$(CONFIG_SYSC_R8A774C0) += r8a774c0-sysc.o
+obj-$(CONFIG_SYSC_R8A774E1) += r8a774e1-sysc.o
obj-$(CONFIG_SYSC_R8A7779) += r8a7779-sysc.o
obj-$(CONFIG_SYSC_R8A7790) += r8a7790-sysc.o
obj-$(CONFIG_SYSC_R8A7791) += r8a7791-sysc.o
obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o
obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o
obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o
-obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77960) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77961) += r8a7796-sysc.o
obj-$(CONFIG_SYSC_R8A77965) += r8a77965-sysc.o
obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
+obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
diff --git a/drivers/soc/renesas/r8a7742-sysc.c b/drivers/soc/renesas/r8a7742-sysc.c
new file mode 100644
index 0000000..219a675
--- /dev/null
+++ b/drivers/soc/renesas/r8a7742-sysc.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G1H System Controller
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7742-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7742_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7742_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7742_PD_CA15_SCU, R8A7742_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7742_PD_CA15_CPU0, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7742_PD_CA15_CPU1, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu2", 0x40, 2, R8A7742_PD_CA15_CPU2, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu3", 0x40, 3, R8A7742_PD_CA15_CPU3, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca7-scu", 0x100, 0, R8A7742_PD_CA7_SCU, R8A7742_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7742_PD_CA7_CPU0, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7742_PD_CA7_CPU1, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu2", 0x1c0, 2, R8A7742_PD_CA7_CPU2, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu3", 0x1c0, 3, R8A7742_PD_CA7_CPU3, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "rgx", 0xc0, 0, R8A7742_PD_RGX, R8A7742_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7742_sysc_info __initconst = {
+ .areas = r8a7742_areas,
+ .num_areas = ARRAY_SIZE(r8a7742_areas),
+};
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
index edf6436..4e2c0ab 100644
--- a/drivers/soc/renesas/r8a7743-sysc.c
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7743-sysc.h>
diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c
index 65dc6b0..865821a 100644
--- a/drivers/soc/renesas/r8a7745-sysc.c
+++ b/drivers/soc/renesas/r8a7745-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7745-sysc.h>
diff --git a/drivers/soc/renesas/r8a77470-sysc.c b/drivers/soc/renesas/r8a77470-sysc.c
index cfa015e..1eeb801 100644
--- a/drivers/soc/renesas/r8a77470-sysc.c
+++ b/drivers/soc/renesas/r8a77470-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2018 Renesas Electronics Corp.
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77470-sysc.h>
diff --git a/drivers/soc/renesas/r8a774a1-sysc.c b/drivers/soc/renesas/r8a774a1-sysc.c
index 9db51ff..38ac2c6 100644
--- a/drivers/soc/renesas/r8a774a1-sysc.c
+++ b/drivers/soc/renesas/r8a774a1-sysc.c
@@ -7,7 +7,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a774a1-sysc.h>
diff --git a/drivers/soc/renesas/r8a774b1-sysc.c b/drivers/soc/renesas/r8a774b1-sysc.c
new file mode 100644
index 0000000..5f97ff2
--- /dev/null
+++ b/drivers/soc/renesas/r8a774b1-sysc.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2N System Controller
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on Renesas R-Car M3-W System Controller
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/bits.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a774b1-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a774b1_areas[] __initconst = {
+ { "always-on", 0, 0, R8A774B1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A774B1_PD_CA57_SCU, R8A774B1_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A774B1_PD_CA57_CPU0, R8A774B1_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A774B1_PD_CA57_CPU1, R8A774B1_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "a3vc", 0x380, 0, R8A774B1_PD_A3VC, R8A774B1_PD_ALWAYS_ON },
+ { "a3vp", 0x340, 0, R8A774B1_PD_A3VP, R8A774B1_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A774B1_PD_A2VC1, R8A774B1_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A774B1_PD_3DG_A, R8A774B1_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A774B1_PD_3DG_B, R8A774B1_PD_3DG_A },
+};
+
+const struct rcar_sysc_info r8a774b1_sysc_info __initconst = {
+ .areas = r8a774b1_areas,
+ .num_areas = ARRAY_SIZE(r8a774b1_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
+};
diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c
index 11050e1..c1c216f 100644
--- a/drivers/soc/renesas/r8a774c0-sysc.c
+++ b/drivers/soc/renesas/r8a774c0-sysc.c
@@ -6,7 +6,7 @@
* Based on Renesas R-Car E3 System Controller
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -50,4 +50,6 @@
.init = r8a774c0_sysc_init,
.areas = r8a774c0_areas,
.num_areas = ARRAY_SIZE(r8a774c0_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a774e1-sysc.c b/drivers/soc/renesas/r8a774e1-sysc.c
new file mode 100644
index 0000000..18449f7
--- /dev/null
+++ b/drivers/soc/renesas/r8a774e1-sysc.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2H System Controller
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ *
+ * Based on Renesas R-Car H3 System Controller
+ * Copyright (C) 2016-2017 Glider bvba
+ */
+
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a774e1-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a774e1_areas[] __initconst = {
+ { "always-on", 0, 0, R8A774E1_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A774E1_PD_CA57_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A774E1_PD_CA57_CPU0, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A774E1_PD_CA57_CPU1, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu2", 0x80, 2, R8A774E1_PD_CA57_CPU2, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca57-cpu3", 0x80, 3, R8A774E1_PD_CA57_CPU3, R8A774E1_PD_CA57_SCU, PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A774E1_PD_CA53_SCU, R8A774E1_PD_ALWAYS_ON, PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A774E1_PD_CA53_CPU0, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A774E1_PD_CA53_CPU1, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A774E1_PD_CA53_CPU2, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A774E1_PD_CA53_CPU3, R8A774E1_PD_CA53_SCU, PD_CPU_NOCR },
+ { "a3vp", 0x340, 0, R8A774E1_PD_A3VP, R8A774E1_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A774E1_PD_A3VC, R8A774E1_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A774E1_PD_A2VC1, R8A774E1_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A774E1_PD_3DG_A, R8A774E1_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A774E1_PD_3DG_B, R8A774E1_PD_3DG_A },
+ { "3dg-c", 0x100, 2, R8A774E1_PD_3DG_C, R8A774E1_PD_3DG_B },
+ { "3dg-d", 0x100, 3, R8A774E1_PD_3DG_D, R8A774E1_PD_3DG_C },
+ { "3dg-e", 0x100, 4, R8A774E1_PD_3DG_E, R8A774E1_PD_3DG_D },
+};
+
+const struct rcar_sysc_info r8a774e1_sysc_info __initconst = {
+ .areas = r8a774e1_areas,
+ .num_areas = ARRAY_SIZE(r8a774e1_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
+};
diff --git a/drivers/soc/renesas/r8a7779-sysc.c b/drivers/soc/renesas/r8a7779-sysc.c
index 517aa40..e24a715 100644
--- a/drivers/soc/renesas/r8a7779-sysc.c
+++ b/drivers/soc/renesas/r8a7779-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7779-sysc.h>
diff --git a/drivers/soc/renesas/r8a7790-sysc.c b/drivers/soc/renesas/r8a7790-sysc.c
index 9b5a6bb..b9afe7f 100644
--- a/drivers/soc/renesas/r8a7790-sysc.c
+++ b/drivers/soc/renesas/r8a7790-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7790-sysc.h>
diff --git a/drivers/soc/renesas/r8a7791-sysc.c b/drivers/soc/renesas/r8a7791-sysc.c
index acf545c..f00fa24 100644
--- a/drivers/soc/renesas/r8a7791-sysc.c
+++ b/drivers/soc/renesas/r8a7791-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7791-sysc.h>
diff --git a/drivers/soc/renesas/r8a7792-sysc.c b/drivers/soc/renesas/r8a7792-sysc.c
index 05b7852..60aae24 100644
--- a/drivers/soc/renesas/r8a7792-sysc.c
+++ b/drivers/soc/renesas/r8a7792-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/soc/renesas/r8a7794-sysc.c b/drivers/soc/renesas/r8a7794-sysc.c
index 0d42637..72ef4e8 100644
--- a/drivers/soc/renesas/r8a7794-sysc.c
+++ b/drivers/soc/renesas/r8a7794-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7794-sysc.h>
diff --git a/drivers/soc/renesas/r8a7795-sysc.c b/drivers/soc/renesas/r8a7795-sysc.c
index cda27a6..9107441 100644
--- a/drivers/soc/renesas/r8a7795-sysc.c
+++ b/drivers/soc/renesas/r8a7795-sysc.c
@@ -5,7 +5,7 @@
* Copyright (C) 2016-2017 Glider bvba
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -51,25 +51,46 @@
/*
- * Fixups for R-Car H3 revisions after ES1.x
+ * Fixups for R-Car H3 revisions
*/
-static const struct soc_device_attribute r8a7795es1[] __initconst = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
+#define HAS_A2VC0 BIT(0) /* Power domain A2VC0 is present */
+#define NO_EXTMASK BIT(1) /* Missing SYSCEXTMASK register */
+
+static const struct soc_device_attribute r8a7795_quirks_match[] __initconst = {
+ {
+ .soc_id = "r8a7795", .revision = "ES1.*",
+ .data = (void *)(HAS_A2VC0 | NO_EXTMASK),
+ }, {
+ .soc_id = "r8a7795", .revision = "ES2.*",
+ .data = (void *)(NO_EXTMASK),
+ },
{ /* sentinel */ }
};
static int __init r8a7795_sysc_init(void)
{
- if (!soc_device_match(r8a7795es1))
+ const struct soc_device_attribute *attr;
+ u32 quirks = 0;
+
+ attr = soc_device_match(r8a7795_quirks_match);
+ if (attr)
+ quirks = (uintptr_t)attr->data;
+
+ if (!(quirks & HAS_A2VC0))
rcar_sysc_nullify(r8a7795_areas, ARRAY_SIZE(r8a7795_areas),
R8A7795_PD_A2VC0);
+ if (quirks & NO_EXTMASK)
+ r8a7795_sysc_info.extmask_val = 0;
+
return 0;
}
-const struct rcar_sysc_info r8a7795_sysc_info __initconst = {
+struct rcar_sysc_info r8a7795_sysc_info __initdata = {
.init = r8a7795_sysc_init,
.areas = r8a7795_areas,
.num_areas = ARRAY_SIZE(r8a7795_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a7796-sysc.c b/drivers/soc/renesas/r8a7796-sysc.c
index 1b06f86..471bd5b 100644
--- a/drivers/soc/renesas/r8a7796-sysc.c
+++ b/drivers/soc/renesas/r8a7796-sysc.c
@@ -1,18 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Renesas R-Car M3-W System Controller
+ * Renesas R-Car M3-W/W+ System Controller
*
* Copyright (C) 2016 Glider bvba
+ * Copyright (C) 2018-2019 Renesas Electronics Corporation
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a7796-sysc.h>
#include "rcar-sysc.h"
-static const struct rcar_sysc_area r8a7796_areas[] __initconst = {
+static struct rcar_sysc_area r8a7796_areas[] __initdata = {
{ "always-on", 0, 0, R8A7796_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A7796_PD_CA57_SCU, R8A7796_PD_ALWAYS_ON,
PD_SCU },
@@ -39,7 +40,28 @@
{ "a3ir", 0x180, 0, R8A7796_PD_A3IR, R8A7796_PD_ALWAYS_ON },
};
-const struct rcar_sysc_info r8a7796_sysc_info __initconst = {
+
+#ifdef CONFIG_SYSC_R8A77960
+const struct rcar_sysc_info r8a77960_sysc_info __initconst = {
.areas = r8a7796_areas,
.num_areas = ARRAY_SIZE(r8a7796_areas),
};
+#endif /* CONFIG_SYSC_R8A77960 */
+
+#ifdef CONFIG_SYSC_R8A77961
+static int __init r8a77961_sysc_init(void)
+{
+ rcar_sysc_nullify(r8a7796_areas, ARRAY_SIZE(r8a7796_areas),
+ R8A7796_PD_A2VC0);
+
+ return 0;
+}
+
+const struct rcar_sysc_info r8a77961_sysc_info __initconst = {
+ .init = r8a77961_sysc_init,
+ .areas = r8a7796_areas,
+ .num_areas = ARRAY_SIZE(r8a7796_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
+};
+#endif /* CONFIG_SYSC_R8A77961 */
diff --git a/drivers/soc/renesas/r8a77965-sysc.c b/drivers/soc/renesas/r8a77965-sysc.c
index e0533be..ff0b0d1 100644
--- a/drivers/soc/renesas/r8a77965-sysc.c
+++ b/drivers/soc/renesas/r8a77965-sysc.c
@@ -7,7 +7,7 @@
* Copyright (C) 2016 Glider bvba
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77965-sysc.h>
@@ -33,4 +33,6 @@
const struct rcar_sysc_info r8a77965_sysc_info __initconst = {
.areas = r8a77965_areas,
.num_areas = ARRAY_SIZE(r8a77965_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
index 280c48b..7062582 100644
--- a/drivers/soc/renesas/r8a77970-sysc.c
+++ b/drivers/soc/renesas/r8a77970-sysc.c
@@ -5,7 +5,7 @@
* Copyright (C) 2017 Cogent Embedded Inc.
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77970-sysc.h>
@@ -32,4 +32,6 @@
const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
.areas = r8a77970_areas,
.num_areas = ARRAY_SIZE(r8a77970_areas),
+ .extmask_offs = 0x1b0,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
index a8dbe55..39ca84a 100644
--- a/drivers/soc/renesas/r8a77980-sysc.c
+++ b/drivers/soc/renesas/r8a77980-sysc.c
@@ -6,7 +6,7 @@
* Copyright (C) 2018 Cogent Embedded, Inc.
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77980-sysc.h>
@@ -49,4 +49,6 @@
const struct rcar_sysc_info r8a77980_sysc_info __initconst = {
.areas = r8a77980_areas,
.num_areas = ARRAY_SIZE(r8a77980_areas),
+ .extmask_offs = 0x138,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77990-sysc.c b/drivers/soc/renesas/r8a77990-sysc.c
index 664b244..9f92737 100644
--- a/drivers/soc/renesas/r8a77990-sysc.c
+++ b/drivers/soc/renesas/r8a77990-sysc.c
@@ -5,7 +5,7 @@
* Copyright (C) 2018 Renesas Electronics Corp.
*/
-#include <linux/bug.h>
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/sys_soc.h>
@@ -50,4 +50,6 @@
.init = r8a77990_sysc_init,
.areas = r8a77990_areas,
.num_areas = ARRAY_SIZE(r8a77990_areas),
+ .extmask_offs = 0x2f8,
+ .extmask_val = BIT(0),
};
diff --git a/drivers/soc/renesas/r8a77995-sysc.c b/drivers/soc/renesas/r8a77995-sysc.c
index 6243aaa..efcc67e 100644
--- a/drivers/soc/renesas/r8a77995-sysc.c
+++ b/drivers/soc/renesas/r8a77995-sysc.c
@@ -5,7 +5,6 @@
* Copyright (C) 2017 Glider bvba
*/
-#include <linux/bug.h>
#include <linux/kernel.h>
#include <dt-bindings/power/r8a77995-sysc.h>
diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c
new file mode 100644
index 0000000..d464ffa
--- /dev/null
+++ b/drivers/soc/renesas/r8a779a0-sysc.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car V3U System Controller
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/power/r8a779a0-sysc.h>
+
+/*
+ * Power Domain flags
+ */
+#define PD_CPU BIT(0) /* Area contains main CPU core */
+#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
+#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
+
+#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */
+#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
+
+/*
+ * Description of a Power Area
+ */
+struct r8a779a0_sysc_area {
+ const char *name;
+ u8 pdr; /* PDRn */
+ int parent; /* -1 if none */
+ unsigned int flags; /* See PD_* */
+};
+
+/*
+ * SoC-specific Power Area Description
+ */
+struct r8a779a0_sysc_info {
+ const struct r8a779a0_sysc_area *areas;
+ unsigned int num_areas;
+};
+
+static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
+ { "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU },
+ { "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU },
+ { "a2e0d0", R8A779A0_PD_A2E0D0, R8A779A0_PD_A3E0, PD_SCU },
+ { "a2e0d1", R8A779A0_PD_A2E0D1, R8A779A0_PD_A3E0, PD_SCU },
+ { "a2e1d0", R8A779A0_PD_A2E1D0, R8A779A0_PD_A3E1, PD_SCU },
+ { "a2e1d1", R8A779A0_PD_A2E1D1, R8A779A0_PD_A3E1, PD_SCU },
+ { "a1e0d0c0", R8A779A0_PD_A1E0D0C0, R8A779A0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c1", R8A779A0_PD_A1E0D0C1, R8A779A0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d1c0", R8A779A0_PD_A1E0D1C0, R8A779A0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e0d1c1", R8A779A0_PD_A1E0D1C1, R8A779A0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e1d0c0", R8A779A0_PD_A1E1D0C0, R8A779A0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d0c1", R8A779A0_PD_A1E1D0C1, R8A779A0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d1c0", R8A779A0_PD_A1E1D1C0, R8A779A0_PD_A2E1D1, PD_CPU_NOCR },
+ { "a1e1d1c1", R8A779A0_PD_A1E1D1C1, R8A779A0_PD_A2E1D1, PD_CPU_NOCR },
+ { "3dg-a", R8A779A0_PD_3DG_A, R8A779A0_PD_ALWAYS_ON },
+ { "3dg-b", R8A779A0_PD_3DG_B, R8A779A0_PD_3DG_A },
+ { "a3vip0", R8A779A0_PD_A3VIP0, R8A779A0_PD_ALWAYS_ON },
+ { "a3vip1", R8A779A0_PD_A3VIP1, R8A779A0_PD_ALWAYS_ON },
+ { "a3vip3", R8A779A0_PD_A3VIP3, R8A779A0_PD_ALWAYS_ON },
+ { "a3vip2", R8A779A0_PD_A3VIP2, R8A779A0_PD_ALWAYS_ON },
+ { "a3isp01", R8A779A0_PD_A3ISP01, R8A779A0_PD_ALWAYS_ON },
+ { "a3isp23", R8A779A0_PD_A3ISP23, R8A779A0_PD_ALWAYS_ON },
+ { "a3ir", R8A779A0_PD_A3IR, R8A779A0_PD_ALWAYS_ON },
+ { "a2cn0", R8A779A0_PD_A2CN0, R8A779A0_PD_A3IR },
+ { "a2imp01", R8A779A0_PD_A2IMP01, R8A779A0_PD_A3IR },
+ { "a2dp0", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
+ { "a2cv0", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
+ { "a2cv1", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
+ { "a2cv4", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
+ { "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
+ { "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR },
+ { "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR },
+ { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR },
+ { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR },
+ { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR },
+ { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR },
+ { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR },
+ { "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR },
+ { "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 },
+ { "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 },
+ { "a1dsp0", R8A779A0_PD_A1DSP0, R8A779A0_PD_A2CN2 },
+ { "a1cnn1", R8A779A0_PD_A1CNN1, R8A779A0_PD_A2CN1 },
+ { "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 },
+};
+
+static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst = {
+ .areas = r8a779a0_areas,
+ .num_areas = ARRAY_SIZE(r8a779a0_areas),
+};
+
+/* SYSC Common */
+#define SYSCSR 0x000 /* SYSC Status Register */
+#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
+#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
+#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
+#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
+#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
+
+/* Power Domain Registers */
+#define PDRSR(n) (0x1000 + ((n) * 0x40))
+#define PDRONCR(n) (0x1004 + ((n) * 0x40))
+#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
+#define PDRESR(n) (0x100C + ((n) * 0x40))
+
+/* PWRON/PWROFF */
+#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
+
+/* PDRESR */
+#define PDRESR_ERR BIT(0)
+
+/* PDRSR */
+#define PDRSR_OFF BIT(0) /* Power-OFF state */
+#define PDRSR_ON BIT(4) /* Power-ON state */
+#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
+#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
+
+#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
+
+#define SYSCSR_TIMEOUT 10000
+#define SYSCSR_DELAY_US 10
+
+#define PDRESR_RETRIES 1000
+#define PDRESR_DELAY_US 10
+
+#define SYSCISR_TIMEOUT 10000
+#define SYSCISR_DELAY_US 10
+
+#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
+
+static void __iomem *r8a779a0_sysc_base;
+static DEFINE_SPINLOCK(r8a779a0_sysc_lock); /* SMP CPUs + I/O devices */
+
+static int r8a779a0_sysc_pwr_on_off(u8 pdr, bool on)
+{
+ unsigned int reg_offs;
+ u32 val;
+ int ret;
+
+ if (on)
+ reg_offs = PDRONCR(pdr);
+ else
+ reg_offs = PDROFFCR(pdr);
+
+ /* Wait until SYSC is ready to accept a power request */
+ ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCSR, val,
+ (val & SYSCSR_BUSY) == SYSCSR_BUSY,
+ SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
+ if (ret < 0)
+ return -EAGAIN;
+
+ /* Submit power shutoff or power resume request */
+ iowrite32(PWRON_PWROFF, r8a779a0_sysc_base + reg_offs);
+
+ return 0;
+}
+
+static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
+{
+ u32 val;
+ int ret;
+
+ iowrite32(isr_mask, r8a779a0_sysc_base + SYSCISCR(reg_idx));
+
+ ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
+ val, !(val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int r8a779a0_sysc_power(u8 pdr, bool on)
+{
+ unsigned int isr_mask;
+ unsigned int reg_idx, bit_idx;
+ unsigned int status;
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+ int k;
+
+ spin_lock_irqsave(&r8a779a0_sysc_lock, flags);
+
+ reg_idx = pdr / NUM_DOMAINS_EACH_REG;
+ bit_idx = pdr % NUM_DOMAINS_EACH_REG;
+
+ isr_mask = BIT(bit_idx);
+
+ /*
+ * The interrupt source needs to be enabled, but masked, to prevent the
+ * CPU from receiving it.
+ */
+ iowrite32(ioread32(r8a779a0_sysc_base + SYSCIER(reg_idx)) | isr_mask,
+ r8a779a0_sysc_base + SYSCIER(reg_idx));
+ iowrite32(ioread32(r8a779a0_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
+ r8a779a0_sysc_base + SYSCIMR(reg_idx));
+
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ /* Submit power shutoff or resume request until it was accepted */
+ for (k = 0; k < PDRESR_RETRIES; k++) {
+ ret = r8a779a0_sysc_pwr_on_off(pdr, on);
+ if (ret)
+ goto out;
+
+ status = ioread32(r8a779a0_sysc_base + PDRESR(pdr));
+ if (!(status & PDRESR_ERR))
+ break;
+
+ udelay(PDRESR_DELAY_US);
+ }
+
+ if (k == PDRESR_RETRIES) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Wait until the power shutoff or resume request has completed * */
+ ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
+ val, (val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Clear interrupt flags */
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ out:
+ spin_unlock_irqrestore(&r8a779a0_sysc_lock, flags);
+
+ pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
+ pdr, ioread32(r8a779a0_sysc_base + SYSCISCR(reg_idx)), ret);
+ return ret;
+}
+
+static bool r8a779a0_sysc_power_is_off(u8 pdr)
+{
+ unsigned int st;
+
+ st = ioread32(r8a779a0_sysc_base + PDRSR(pdr));
+
+ if (st & PDRSR_OFF)
+ return true;
+
+ return false;
+}
+
+struct r8a779a0_sysc_pd {
+ struct generic_pm_domain genpd;
+ u8 pdr;
+ unsigned int flags;
+ char name[];
+};
+
+static inline struct r8a779a0_sysc_pd *to_r8a779a0_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct r8a779a0_sysc_pd, genpd);
+}
+
+static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return r8a779a0_sysc_power(pd->pdr, false);
+}
+
+static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return r8a779a0_sysc_power(pd->pdr, true);
+}
+
+static int __init r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd *pd)
+{
+ struct generic_pm_domain *genpd = &pd->genpd;
+ const char *name = pd->genpd.name;
+ int error;
+
+ if (pd->flags & PD_CPU) {
+ /*
+ * This domain contains a CPU core and therefore it should
+ * only be turned off if the CPU is not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "CPU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_SCU) {
+ /*
+ * This domain contains an SCU and cache-controller, and
+ * therefore it should only be turned off if the CPU cores are
+ * not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "SCU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_NO_CR) {
+ /*
+ * This domain cannot be turned off.
+ */
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ }
+
+ if (!(pd->flags & (PD_CPU | PD_SCU))) {
+ /* Enable Clock Domain for I/O devices */
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ }
+
+ genpd->power_off = r8a779a0_sysc_pd_power_off;
+ genpd->power_on = r8a779a0_sysc_pd_power_on;
+
+ if (pd->flags & (PD_CPU | PD_NO_CR)) {
+ /* Skip CPUs (handled by SMP code) and areas without control */
+ pr_debug("%s: Not touching %s\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ if (!r8a779a0_sysc_power_is_off(pd->pdr)) {
+ pr_debug("%s: %s is already powered\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ r8a779a0_sysc_power(pd->pdr, true);
+
+finalize:
+ error = pm_genpd_init(genpd, &simple_qos_governor, false);
+ if (error)
+ pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+ return error;
+}
+
+static const struct of_device_id r8a779a0_sysc_matches[] __initconst = {
+ { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
+ { /* sentinel */ }
+};
+
+struct r8a779a0_pm_domains {
+ struct genpd_onecell_data onecell_data;
+ struct generic_pm_domain *domains[R8A779A0_PD_ALWAYS_ON + 1];
+};
+
+static struct genpd_onecell_data *r8a779a0_sysc_onecell_data;
+
+static int __init r8a779a0_sysc_pd_init(void)
+{
+ const struct r8a779a0_sysc_info *info;
+ const struct of_device_id *match;
+ struct r8a779a0_pm_domains *domains;
+ struct device_node *np;
+ void __iomem *base;
+ unsigned int i;
+ int error;
+
+ np = of_find_matching_node_and_match(NULL, r8a779a0_sysc_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ info = match->data;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%pOF: Cannot map regs\n", np);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ r8a779a0_sysc_base = base;
+
+ domains = kzalloc(sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ domains->onecell_data.domains = domains->domains;
+ domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+ r8a779a0_sysc_onecell_data = &domains->onecell_data;
+
+ for (i = 0; i < info->num_areas; i++) {
+ const struct r8a779a0_sysc_area *area = &info->areas[i];
+ struct r8a779a0_sysc_pd *pd;
+
+ if (!area->name) {
+ /* Skip NULLified area */
+ continue;
+ }
+
+ pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
+ if (!pd) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ strcpy(pd->name, area->name);
+ pd->genpd.name = pd->name;
+ pd->pdr = area->pdr;
+ pd->flags = area->flags;
+
+ error = r8a779a0_sysc_pd_setup(pd);
+ if (error)
+ goto out_put;
+
+ domains->domains[area->pdr] = &pd->genpd;
+
+ if (area->parent < 0)
+ continue;
+
+ error = pm_genpd_add_subdomain(domains->domains[area->parent],
+ &pd->genpd);
+ if (error) {
+ pr_warn("Failed to add PM subdomain %s to parent %u\n",
+ area->name, area->parent);
+ goto out_put;
+ }
+ }
+
+ error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+early_initcall(r8a779a0_sysc_pd_init);
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index d183c38..8a1e402 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -21,7 +21,7 @@
struct rst_config {
unsigned int modemr; /* Mode Monitoring Register Offset */
- int (*configure)(void *base); /* Platform specific configuration */
+ int (*configure)(void __iomem *base); /* Platform specific config */
};
static const struct rst_config rcar_rst_gen1 __initconst = {
@@ -37,15 +37,22 @@
.modemr = 0x60,
};
+static const struct rst_config rcar_rst_r8a779a0 __initconst = {
+ .modemr = 0x00, /* MODEMR0 and it has CPG related bits */
+};
+
static const struct of_device_id rcar_rst_matches[] __initconst = {
/* RZ/G1 is handled like R-Car Gen2 */
+ { .compatible = "renesas,r8a7742-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7743-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7744-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7745-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a77470-rst", .data = &rcar_rst_gen2 },
/* RZ/G2 is handled like R-Car Gen3 */
{ .compatible = "renesas,r8a774a1-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774b1-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a774c0-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a774e1-rst", .data = &rcar_rst_gen3 },
/* R-Car Gen1 */
{ .compatible = "renesas,r8a7778-reset-wdt", .data = &rcar_rst_gen1 },
{ .compatible = "renesas,r8a7779-reset-wdt", .data = &rcar_rst_gen1 },
@@ -58,11 +65,14 @@
/* R-Car Gen3 */
{ .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77961-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77965-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
+ /* R-Car V3U */
+ { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_r8a779a0 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 59b5e6b..9b235fc 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -63,6 +63,7 @@
static void __iomem *rcar_sysc_base;
static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
+static u32 rcar_sysc_extmask_offs, rcar_sysc_extmask_val;
static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
{
@@ -106,6 +107,14 @@
spin_lock_irqsave(&rcar_sysc_lock, flags);
/*
+ * Mask external power requests for CPU or 3DG domains
+ */
+ if (rcar_sysc_extmask_val) {
+ iowrite32(rcar_sysc_extmask_val,
+ rcar_sysc_base + rcar_sysc_extmask_offs);
+ }
+
+ /*
* The interrupt source needs to be enabled, but masked, to prevent the
* CPU from receiving it.
*/
@@ -148,6 +157,9 @@
iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
out:
+ if (rcar_sysc_extmask_val)
+ iowrite32(0, rcar_sysc_base + rcar_sysc_extmask_offs);
+
spin_unlock_irqrestore(&rcar_sysc_lock, flags);
pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
@@ -261,6 +273,9 @@
}
static const struct of_device_id rcar_sysc_matches[] __initconst = {
+#ifdef CONFIG_SYSC_R8A7742
+ { .compatible = "renesas,r8a7742-sysc", .data = &r8a7742_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A7743
{ .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
/* RZ/G1N is identical to RZ/G2M w.r.t. power domains. */
@@ -275,9 +290,15 @@
#ifdef CONFIG_SYSC_R8A774A1
{ .compatible = "renesas,r8a774a1-sysc", .data = &r8a774a1_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A774B1
+ { .compatible = "renesas,r8a774b1-sysc", .data = &r8a774b1_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A774C0
{ .compatible = "renesas,r8a774c0-sysc", .data = &r8a774c0_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A774E1
+ { .compatible = "renesas,r8a774e1-sysc", .data = &r8a774e1_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
@@ -298,8 +319,11 @@
#ifdef CONFIG_SYSC_R8A7795
{ .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
#endif
-#ifdef CONFIG_SYSC_R8A7796
- { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
+#ifdef CONFIG_SYSC_R8A77960
+ { .compatible = "renesas,r8a7796-sysc", .data = &r8a77960_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A77961
+ { .compatible = "renesas,r8a77961-sysc", .data = &r8a77961_sysc_info },
#endif
#ifdef CONFIG_SYSC_R8A77965
{ .compatible = "renesas,r8a77965-sysc", .data = &r8a77965_sysc_info },
@@ -360,6 +384,10 @@
rcar_sysc_base = base;
+ /* Optional External Request Mask Register */
+ rcar_sysc_extmask_offs = info->extmask_offs;
+ rcar_sysc_extmask_val = info->extmask_val;
+
domains = kzalloc(sizeof(*domains), GFP_KERNEL);
if (!domains) {
error = -ENOMEM;
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 485520a..8d861c1 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Renesas R-Car System Controller
*
* Copyright (C) 2016 Glider bvba
@@ -44,20 +44,27 @@
int (*init)(void); /* Optional */
const struct rcar_sysc_area *areas;
unsigned int num_areas;
+ /* Optional External Request Mask Register */
+ u32 extmask_offs; /* SYSCEXTMASK register offset */
+ u32 extmask_val; /* SYSCEXTMASK register mask value */
};
+extern const struct rcar_sysc_info r8a7742_sysc_info;
extern const struct rcar_sysc_info r8a7743_sysc_info;
extern const struct rcar_sysc_info r8a7745_sysc_info;
extern const struct rcar_sysc_info r8a77470_sysc_info;
extern const struct rcar_sysc_info r8a774a1_sysc_info;
+extern const struct rcar_sysc_info r8a774b1_sysc_info;
extern const struct rcar_sysc_info r8a774c0_sysc_info;
+extern const struct rcar_sysc_info r8a774e1_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
-extern const struct rcar_sysc_info r8a7795_sysc_info;
-extern const struct rcar_sysc_info r8a7796_sysc_info;
+extern struct rcar_sysc_info r8a7795_sysc_info;
+extern const struct rcar_sysc_info r8a77960_sysc_info;
+extern const struct rcar_sysc_info r8a77961_sysc_info;
extern const struct rcar_sysc_info r8a77965_sysc_info;
extern const struct rcar_sysc_info r8a77970_sysc_info;
extern const struct rcar_sysc_info r8a77980_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 6651755..0f8eff4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -116,11 +116,21 @@
.id = 0x52,
};
+static const struct renesas_soc soc_rz_g2n __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x55,
+};
+
static const struct renesas_soc soc_rz_g2e __initconst __maybe_unused = {
.family = &fam_rzg2,
.id = 0x57,
};
+static const struct renesas_soc soc_rz_g2h __initconst __maybe_unused = {
+ .family = &fam_rzg2,
+ .id = 0x4f,
+};
+
static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
.family = &fam_rcar_gen1,
};
@@ -190,6 +200,11 @@
.id = 0x58,
};
+static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x59,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -227,9 +242,15 @@
#ifdef CONFIG_ARCH_R8A774A1
{ .compatible = "renesas,r8a774a1", .data = &soc_rz_g2m },
#endif
+#ifdef CONFIG_ARCH_R8A774B1
+ { .compatible = "renesas,r8a774b1", .data = &soc_rz_g2n },
+#endif
#ifdef CONFIG_ARCH_R8A774C0
{ .compatible = "renesas,r8a774c0", .data = &soc_rz_g2e },
#endif
+#ifdef CONFIG_ARCH_R8A774E1
+ { .compatible = "renesas,r8a774e1", .data = &soc_rz_g2h },
+#endif
#ifdef CONFIG_ARCH_R8A7778
{ .compatible = "renesas,r8a7778", .data = &soc_rcar_m1a },
#endif
@@ -251,12 +272,15 @@
#ifdef CONFIG_ARCH_R8A7794
{ .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
#endif
-#ifdef CONFIG_ARCH_R8A7795
+#if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951)
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
#endif
-#ifdef CONFIG_ARCH_R8A7796
+#ifdef CONFIG_ARCH_R8A77960
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
#endif
+#ifdef CONFIG_ARCH_R8A77961
+ { .compatible = "renesas,r8a77961", .data = &soc_rcar_m3_w },
+#endif
#ifdef CONFIG_ARCH_R8A77965
{ .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
#endif
@@ -272,6 +296,9 @@
#ifdef CONFIG_ARCH_R8A77995
{ .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
#endif
+#ifdef CONFIG_ARCH_R8A779A0
+ { .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
+#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
index 7859907..25eb2c1 100644
--- a/drivers/soc/rockchip/Kconfig
+++ b/drivers/soc/rockchip/Kconfig
@@ -14,6 +14,14 @@
In a lot of cases there also need to be default settings initialized
to make some of them conform to expectations of the kernel.
+config ROCKCHIP_IODOMAIN
+ tristate "Rockchip IO domain support"
+ depends on OF
+ help
+ Say y here to enable support io domains on Rockchip SoCs. It is
+ necessary for the io domain setting of the SoC to match the
+ voltage supplied by the regulators.
+
config ROCKCHIP_PM_DOMAINS
bool "Rockchip generic power domain"
depends on PM
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
index afca0a4..875032f 100644
--- a/drivers/soc/rockchip/Makefile
+++ b/drivers/soc/rockchip/Makefile
@@ -3,4 +3,5 @@
# Rockchip Soc drivers
#
obj-$(CONFIG_ROCKCHIP_GRF) += grf.o
+obj-$(CONFIG_ROCKCHIP_IODOMAIN) += io-domain.o
obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
new file mode 100644
index 0000000..b29e829
--- /dev/null
+++ b/drivers/soc/rockchip/io-domain.c
@@ -0,0 +1,631 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip IO Voltage Domain driver
+ *
+ * Copyright 2014 MundoReader S.L.
+ * Copyright 2014 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define MAX_SUPPLIES 16
+
+/*
+ * The max voltage for 1.8V and 3.3V come from the Rockchip datasheet under
+ * "Recommended Operating Conditions" for "Digital GPIO". When the typical
+ * is 3.3V the max is 3.6V. When the typical is 1.8V the max is 1.98V.
+ *
+ * They are used like this:
+ * - If the voltage on a rail is above the "1.8" voltage (1.98V) we'll tell the
+ * SoC we're at 3.3.
+ * - If the voltage on a rail is above the "3.3" voltage (3.6V) we'll consider
+ * that to be an error.
+ */
+#define MAX_VOLTAGE_1_8 1980000
+#define MAX_VOLTAGE_3_3 3600000
+
+#define PX30_IO_VSEL 0x180
+#define PX30_IO_VSEL_VCCIO6_SRC BIT(0)
+#define PX30_IO_VSEL_VCCIO6_SUPPLY_NUM 1
+
+#define RK3288_SOC_CON2 0x24c
+#define RK3288_SOC_CON2_FLASH0 BIT(7)
+#define RK3288_SOC_FLASH_SUPPLY_NUM 2
+
+#define RK3328_SOC_CON4 0x410
+#define RK3328_SOC_CON4_VCCIO2 BIT(7)
+#define RK3328_SOC_VCCIO2_SUPPLY_NUM 1
+
+#define RK3368_SOC_CON15 0x43c
+#define RK3368_SOC_CON15_FLASH0 BIT(14)
+#define RK3368_SOC_FLASH_SUPPLY_NUM 2
+
+#define RK3399_PMUGRF_CON0 0x180
+#define RK3399_PMUGRF_CON0_VSEL BIT(8)
+#define RK3399_PMUGRF_VSEL_SUPPLY_NUM 9
+
+struct rockchip_iodomain;
+
+/**
+ * @supplies: voltage settings matching the register bits.
+ */
+struct rockchip_iodomain_soc_data {
+ int grf_offset;
+ const char *supply_names[MAX_SUPPLIES];
+ void (*init)(struct rockchip_iodomain *iod);
+};
+
+struct rockchip_iodomain_supply {
+ struct rockchip_iodomain *iod;
+ struct regulator *reg;
+ struct notifier_block nb;
+ int idx;
+};
+
+struct rockchip_iodomain {
+ struct device *dev;
+ struct regmap *grf;
+ const struct rockchip_iodomain_soc_data *soc_data;
+ struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
+};
+
+static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply,
+ int uV)
+{
+ struct rockchip_iodomain *iod = supply->iod;
+ u32 val;
+ int ret;
+
+ /* set value bit */
+ val = (uV > MAX_VOLTAGE_1_8) ? 0 : 1;
+ val <<= supply->idx;
+
+ /* apply hiword-mask */
+ val |= (BIT(supply->idx) << 16);
+
+ ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val);
+ if (ret)
+ dev_err(iod->dev, "Couldn't write to GRF\n");
+
+ return ret;
+}
+
+static int rockchip_iodomain_notify(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct rockchip_iodomain_supply *supply =
+ container_of(nb, struct rockchip_iodomain_supply, nb);
+ int uV;
+ int ret;
+
+ /*
+ * According to Rockchip it's important to keep the SoC IO domain
+ * higher than (or equal to) the external voltage. That means we need
+ * to change it before external voltage changes happen in the case
+ * of an increase.
+ *
+ * Note that in the "pre" change we pick the max possible voltage that
+ * the regulator might end up at (the client requests a range and we
+ * don't know for certain the exact voltage). Right now we rely on the
+ * slop in MAX_VOLTAGE_1_8 and MAX_VOLTAGE_3_3 to save us if clients
+ * request something like a max of 3.6V when they really want 3.3V.
+ * We could attempt to come up with better rules if this fails.
+ */
+ if (event & REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) {
+ struct pre_voltage_change_data *pvc_data = data;
+
+ uV = max_t(unsigned long, pvc_data->old_uV, pvc_data->max_uV);
+ } else if (event & (REGULATOR_EVENT_VOLTAGE_CHANGE |
+ REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE)) {
+ uV = (unsigned long)data;
+ } else {
+ return NOTIFY_OK;
+ }
+
+ dev_dbg(supply->iod->dev, "Setting to %d\n", uV);
+
+ if (uV > MAX_VOLTAGE_3_3) {
+ dev_err(supply->iod->dev, "Voltage too high: %d\n", uV);
+
+ if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+ return NOTIFY_BAD;
+ }
+
+ ret = rockchip_iodomain_write(supply, uV);
+ if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
+ return NOTIFY_BAD;
+
+ dev_dbg(supply->iod->dev, "Setting to %d done\n", uV);
+ return NOTIFY_OK;
+}
+
+static void px30_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no VCCIO6 supply we should leave things alone */
+ if (!iod->supplies[PX30_IO_VSEL_VCCIO6_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set vccio6 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = PX30_IO_VSEL_VCCIO6_SRC | (PX30_IO_VSEL_VCCIO6_SRC << 16);
+ ret = regmap_write(iod->grf, PX30_IO_VSEL, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update vccio6 ctrl\n");
+}
+
+static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no flash supply we should leave things alone */
+ if (!iod->supplies[RK3288_SOC_FLASH_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set flash0 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3288_SOC_CON2_FLASH0 | (RK3288_SOC_CON2_FLASH0 << 16);
+ ret = regmap_write(iod->grf, RK3288_SOC_CON2, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
+static void rk3328_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no vccio2 supply we should leave things alone */
+ if (!iod->supplies[RK3328_SOC_VCCIO2_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set vccio2 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3328_SOC_CON4_VCCIO2 | (RK3328_SOC_CON4_VCCIO2 << 16);
+ ret = regmap_write(iod->grf, RK3328_SOC_CON4, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update vccio2 vsel ctrl\n");
+}
+
+static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no flash supply we should leave things alone */
+ if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set flash0 iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3368_SOC_CON15_FLASH0 | (RK3368_SOC_CON15_FLASH0 << 16);
+ ret = regmap_write(iod->grf, RK3368_SOC_CON15, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
+static void rk3399_pmu_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no pmu io supply we should leave things alone */
+ if (!iod->supplies[RK3399_PMUGRF_VSEL_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set pmu io iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3399_PMUGRF_CON0_VSEL | (RK3399_PMUGRF_CON0_VSEL << 16);
+ ret = regmap_write(iod->grf, RK3399_PMUGRF_CON0, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update pmu io iodomain ctrl\n");
+}
+
+static const struct rockchip_iodomain_soc_data soc_data_px30 = {
+ .grf_offset = 0x180,
+ .supply_names = {
+ NULL,
+ "vccio6",
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio-oscgpi",
+ },
+ .init = px30_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_px30_pmu = {
+ .grf_offset = 0x100,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmuio1",
+ "pmuio2",
+ },
+};
+
+/*
+ * On the rk3188 the io-domains are handled by a shared register with the
+ * lower 8 bits being still being continuing drive-strength settings.
+ */
+static const struct rockchip_iodomain_soc_data soc_data_rk3188 = {
+ .grf_offset = 0x104,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "ap0",
+ "ap1",
+ "cif",
+ "flash",
+ "vccio0",
+ "vccio1",
+ "lcdc0",
+ "lcdc1",
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3228 = {
+ .grf_offset = 0x418,
+ .supply_names = {
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
+ .grf_offset = 0x380,
+ .supply_names = {
+ "lcdc", /* LCDC_VDD */
+ "dvp", /* DVPIO_VDD */
+ "flash0", /* FLASH0_VDD (emmc) */
+ "flash1", /* FLASH1_VDD (sdio1) */
+ "wifi", /* APIO3_VDD (sdio0) */
+ "bb", /* APIO5_VDD */
+ "audio", /* APIO4_VDD */
+ "sdcard", /* SDMMC0_VDD (sdmmc) */
+ "gpio30", /* APIO1_VDD */
+ "gpio1830", /* APIO2_VDD */
+ },
+ .init = rk3288_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3328 = {
+ .grf_offset = 0x410,
+ .supply_names = {
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio6",
+ "pmuio",
+ },
+ .init = rk3328_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
+ .grf_offset = 0x900,
+ .supply_names = {
+ NULL, /* reserved */
+ "dvp", /* DVPIO_VDD */
+ "flash0", /* FLASH0_VDD (emmc) */
+ "wifi", /* APIO2_VDD (sdio0) */
+ NULL,
+ "audio", /* APIO3_VDD */
+ "sdcard", /* SDMMC0_VDD (sdmmc) */
+ "gpio30", /* APIO1_VDD */
+ "gpio1830", /* APIO4_VDD (gpujtag) */
+ },
+ .init = rk3368_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
+ .grf_offset = 0x100,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmu", /*PMU IO domain*/
+ "vop", /*LCDC IO domain*/
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3399 = {
+ .grf_offset = 0xe640,
+ .supply_names = {
+ "bt656", /* APIO2_VDD */
+ "audio", /* APIO5_VDD */
+ "sdmmc", /* SDMMC0_VDD */
+ "gpio1830", /* APIO4_VDD */
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = {
+ .grf_offset = 0x180,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmu1830", /* PMUIO2_VDD */
+ },
+ .init = rk3399_pmu_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rv1108 = {
+ .grf_offset = 0x404,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio5",
+ "vccio6",
+ },
+
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
+ .grf_offset = 0x104,
+ .supply_names = {
+ "pmu",
+ },
+};
+
+static const struct of_device_id rockchip_iodomain_match[] = {
+ {
+ .compatible = "rockchip,px30-io-voltage-domain",
+ .data = (void *)&soc_data_px30
+ },
+ {
+ .compatible = "rockchip,px30-pmu-io-voltage-domain",
+ .data = (void *)&soc_data_px30_pmu
+ },
+ {
+ .compatible = "rockchip,rk3188-io-voltage-domain",
+ .data = &soc_data_rk3188
+ },
+ {
+ .compatible = "rockchip,rk3228-io-voltage-domain",
+ .data = &soc_data_rk3228
+ },
+ {
+ .compatible = "rockchip,rk3288-io-voltage-domain",
+ .data = &soc_data_rk3288
+ },
+ {
+ .compatible = "rockchip,rk3328-io-voltage-domain",
+ .data = &soc_data_rk3328
+ },
+ {
+ .compatible = "rockchip,rk3368-io-voltage-domain",
+ .data = &soc_data_rk3368
+ },
+ {
+ .compatible = "rockchip,rk3368-pmu-io-voltage-domain",
+ .data = &soc_data_rk3368_pmu
+ },
+ {
+ .compatible = "rockchip,rk3399-io-voltage-domain",
+ .data = &soc_data_rk3399
+ },
+ {
+ .compatible = "rockchip,rk3399-pmu-io-voltage-domain",
+ .data = &soc_data_rk3399_pmu
+ },
+ {
+ .compatible = "rockchip,rv1108-io-voltage-domain",
+ .data = &soc_data_rv1108
+ },
+ {
+ .compatible = "rockchip,rv1108-pmu-io-voltage-domain",
+ .data = &soc_data_rv1108_pmu
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
+
+static int rockchip_iodomain_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct rockchip_iodomain *iod;
+ struct device *parent;
+ int i, ret = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ iod = devm_kzalloc(&pdev->dev, sizeof(*iod), GFP_KERNEL);
+ if (!iod)
+ return -ENOMEM;
+
+ iod->dev = &pdev->dev;
+ platform_set_drvdata(pdev, iod);
+
+ match = of_match_node(rockchip_iodomain_match, np);
+ iod->soc_data = match->data;
+
+ parent = pdev->dev.parent;
+ if (parent && parent->of_node) {
+ iod->grf = syscon_node_to_regmap(parent->of_node);
+ } else {
+ dev_dbg(&pdev->dev, "falling back to old binding\n");
+ iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ }
+
+ if (IS_ERR(iod->grf)) {
+ dev_err(&pdev->dev, "couldn't find grf regmap\n");
+ return PTR_ERR(iod->grf);
+ }
+
+ for (i = 0; i < MAX_SUPPLIES; i++) {
+ const char *supply_name = iod->soc_data->supply_names[i];
+ struct rockchip_iodomain_supply *supply = &iod->supplies[i];
+ struct regulator *reg;
+ int uV;
+
+ if (!supply_name)
+ continue;
+
+ reg = devm_regulator_get_optional(iod->dev, supply_name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ /* If a supply wasn't specified, that's OK */
+ if (ret == -ENODEV)
+ continue;
+ else if (ret != -EPROBE_DEFER)
+ dev_err(iod->dev, "couldn't get regulator %s\n",
+ supply_name);
+ goto unreg_notify;
+ }
+
+ /* set initial correct value */
+ uV = regulator_get_voltage(reg);
+
+ /* must be a regulator we can get the voltage of */
+ if (uV < 0) {
+ dev_err(iod->dev, "Can't determine voltage: %s\n",
+ supply_name);
+ ret = uV;
+ goto unreg_notify;
+ }
+
+ if (uV > MAX_VOLTAGE_3_3) {
+ dev_crit(iod->dev,
+ "%d uV is too high. May damage SoC!\n",
+ uV);
+ ret = -EINVAL;
+ goto unreg_notify;
+ }
+
+ /* setup our supply */
+ supply->idx = i;
+ supply->iod = iod;
+ supply->reg = reg;
+ supply->nb.notifier_call = rockchip_iodomain_notify;
+
+ ret = rockchip_iodomain_write(supply, uV);
+ if (ret) {
+ supply->reg = NULL;
+ goto unreg_notify;
+ }
+
+ /* register regulator notifier */
+ ret = regulator_register_notifier(reg, &supply->nb);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "regulator notifier request failed\n");
+ supply->reg = NULL;
+ goto unreg_notify;
+ }
+ }
+
+ if (iod->soc_data->init)
+ iod->soc_data->init(iod);
+
+ return 0;
+
+unreg_notify:
+ for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+ struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+ if (io_supply->reg)
+ regulator_unregister_notifier(io_supply->reg,
+ &io_supply->nb);
+ }
+
+ return ret;
+}
+
+static int rockchip_iodomain_remove(struct platform_device *pdev)
+{
+ struct rockchip_iodomain *iod = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = MAX_SUPPLIES - 1; i >= 0; i--) {
+ struct rockchip_iodomain_supply *io_supply = &iod->supplies[i];
+
+ if (io_supply->reg)
+ regulator_unregister_notifier(io_supply->reg,
+ &io_supply->nb);
+ }
+
+ return 0;
+}
+
+static struct platform_driver rockchip_iodomain_driver = {
+ .probe = rockchip_iodomain_probe,
+ .remove = rockchip_iodomain_remove,
+ .driver = {
+ .name = "rockchip-iodomain",
+ .of_match_table = rockchip_iodomain_match,
+ },
+};
+
+module_platform_driver(rockchip_iodomain_driver);
+
+MODULE_DESCRIPTION("Rockchip IO-domain driver");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_AUTHOR("Doug Anderson <dianders@chromium.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 33ad0de..fc7f48a 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -1,12 +1,22 @@
# SPDX-License-Identifier: GPL-2.0
#
-# SAMSUNG SoC drivers
+# Samsung SoC drivers
#
menuconfig SOC_SAMSUNG
bool "Samsung SoC driver support" if COMPILE_TEST
if SOC_SAMSUNG
+config EXYNOS_ASV
+ bool "Exynos Adaptive Supply Voltage support" if COMPILE_TEST
+ depends on (ARCH_EXYNOS && EXYNOS_CHIPID) || COMPILE_TEST
+ select EXYNOS_ASV_ARM if ARM && ARCH_EXYNOS
+
+# There is no need to enable these drivers for ARMv8
+config EXYNOS_ASV_ARM
+ bool "Exynos ASV ARMv7-specific driver extensions" if COMPILE_TEST
+ depends on EXYNOS_ASV
+
config EXYNOS_CHIPID
bool "Exynos Chipid controller driver" if COMPILE_TEST
depends on ARCH_EXYNOS || COMPILE_TEST
@@ -25,6 +35,56 @@
config EXYNOS_PM_DOMAINS
bool "Exynos PM domains" if COMPILE_TEST
- depends on PM_GENERIC_DOMAINS || COMPILE_TEST
+ depends on (ARCH_EXYNOS && PM_GENERIC_DOMAINS) || COMPILE_TEST
+config SAMSUNG_PM_DEBUG
+ bool "Samsung PM Suspend debug"
+ depends on PM && DEBUG_KERNEL
+ depends on PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210
+ depends on DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART
+ depends on DEBUG_LL && MMU
+ help
+ Say Y here if you want verbose debugging from the PM Suspend and
+ Resume code. See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
+ for more information.
+
+config S3C_PM_DEBUG_LED_SMDK
+ bool "SMDK LED suspend/resume debugging"
+ depends on PM && (MACH_SMDK6410)
+ help
+ Say Y here to enable the use of the SMDK LEDs on the baseboard
+ for debugging of the state of the suspend and resume process.
+
+ Note, this currently only works for S3C64XX based SMDK boards.
+
+config SAMSUNG_PM_CHECK
+ bool "S3C2410 PM Suspend Memory CRC"
+ depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
+ select CRC32
+ help
+ Enable the PM code's memory area checksum over sleep. This option
+ will generate CRCs of all blocks of memory, and store them before
+ going to sleep. The blocks are then checked on resume for any
+ errors.
+
+ Note, this can take several seconds depending on memory size
+ and CPU speed.
+
+ See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
+
+config SAMSUNG_PM_CHECK_CHUNKSIZE
+ int "S3C2410 PM Suspend CRC Chunksize (KiB)"
+ depends on PM && SAMSUNG_PM_CHECK
+ default 64
+ help
+ Set the chunksize in Kilobytes of the CRC for checking memory
+ corruption over suspend and resume. A smaller value will mean that
+ the CRC data block will take more memory, but will identify any
+ faults with better precision.
+
+ See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
+
+config EXYNOS_REGULATOR_COUPLER
+ bool "Exynos SoC Regulator Coupler" if COMPILE_TEST
+ depends on ARCH_EXYNOS || COMPILE_TEST
endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 3b6a879..59e8e94 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,8 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_EXYNOS_ASV) += exynos-asv.o
+obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
+
obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
+obj-$(CONFIG_EXYNOS_REGULATOR_COUPLER) += exynos-regulator-coupler.o
+
+obj-$(CONFIG_SAMSUNG_PM_CHECK) += s3c-pm-check.o
+obj-$(CONFIG_SAMSUNG_PM_DEBUG) += s3c-pm-debug.o
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
new file mode 100644
index 0000000..5daeadc
--- /dev/null
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage support
+ */
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+
+#include "exynos-asv.h"
+#include "exynos5422-asv.h"
+
+#define MHZ 1000000U
+
+static int exynos_asv_update_cpu_opps(struct exynos_asv *asv,
+ struct device *cpu)
+{
+ struct exynos_asv_subsys *subsys = NULL;
+ struct dev_pm_opp *opp;
+ unsigned int opp_freq;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(asv->subsys); i++) {
+ if (of_device_is_compatible(cpu->of_node,
+ asv->subsys[i].cpu_dt_compat)) {
+ subsys = &asv->subsys[i];
+ break;
+ }
+ }
+ if (!subsys)
+ return -EINVAL;
+
+ for (i = 0; i < subsys->table.num_rows; i++) {
+ unsigned int new_volt, volt;
+ int ret;
+
+ opp_freq = exynos_asv_opp_get_frequency(subsys, i);
+
+ opp = dev_pm_opp_find_freq_exact(cpu, opp_freq * MHZ, true);
+ if (IS_ERR(opp)) {
+ dev_info(asv->dev, "cpu%d opp%d, freq: %u missing\n",
+ cpu->id, i, opp_freq);
+
+ continue;
+ }
+
+ volt = dev_pm_opp_get_voltage(opp);
+ new_volt = asv->opp_get_voltage(subsys, i, volt);
+ dev_pm_opp_put(opp);
+
+ if (new_volt == volt)
+ continue;
+
+ ret = dev_pm_opp_adjust_voltage(cpu, opp_freq * MHZ,
+ new_volt, new_volt, new_volt);
+ if (ret < 0)
+ dev_err(asv->dev,
+ "Failed to adjust OPP %u Hz/%u uV for cpu%d\n",
+ opp_freq, new_volt, cpu->id);
+ else
+ dev_dbg(asv->dev,
+ "Adjusted OPP %u Hz/%u -> %u uV, cpu%d\n",
+ opp_freq, volt, new_volt, cpu->id);
+ }
+
+ return 0;
+}
+
+static int exynos_asv_update_opps(struct exynos_asv *asv)
+{
+ struct opp_table *last_opp_table = NULL;
+ struct device *cpu;
+ int ret, cpuid;
+
+ for_each_possible_cpu(cpuid) {
+ struct opp_table *opp_table;
+
+ cpu = get_cpu_device(cpuid);
+ if (!cpu)
+ continue;
+
+ opp_table = dev_pm_opp_get_opp_table(cpu);
+ if (IS_ERR(opp_table))
+ continue;
+
+ if (!last_opp_table || opp_table != last_opp_table) {
+ last_opp_table = opp_table;
+
+ ret = exynos_asv_update_cpu_opps(asv, cpu);
+ if (ret < 0)
+ dev_err(asv->dev, "Couldn't udate OPPs for cpu%d\n",
+ cpuid);
+ }
+
+ dev_pm_opp_put_opp_table(opp_table);
+ }
+
+ return 0;
+}
+
+static int exynos_asv_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct exynos_asv *asv);
+ struct exynos_asv *asv;
+ struct device *cpu_dev;
+ u32 product_id = 0;
+ int ret, i;
+
+ asv = devm_kzalloc(&pdev->dev, sizeof(*asv), GFP_KERNEL);
+ if (!asv)
+ return -ENOMEM;
+
+ asv->chipid_regmap = device_node_to_regmap(pdev->dev.of_node);
+ if (IS_ERR(asv->chipid_regmap)) {
+ dev_err(&pdev->dev, "Could not find syscon regmap\n");
+ return PTR_ERR(asv->chipid_regmap);
+ }
+
+ ret = regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PRO_ID,
+ &product_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot read revision from ChipID: %d\n",
+ ret);
+ return -ENODEV;
+ }
+
+ switch (product_id & EXYNOS_MASK) {
+ case 0xE5422000:
+ probe_func = exynos5422_asv_init;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ cpu_dev = get_cpu_device(0);
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret < 0)
+ return -EPROBE_DEFER;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "samsung,asv-bin",
+ &asv->of_bin);
+ if (ret < 0)
+ asv->of_bin = -EINVAL;
+
+ asv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, asv);
+
+ for (i = 0; i < ARRAY_SIZE(asv->subsys); i++)
+ asv->subsys[i].asv = asv;
+
+ ret = probe_func(asv);
+ if (ret < 0)
+ return ret;
+
+ return exynos_asv_update_opps(asv);
+}
+
+static const struct of_device_id exynos_asv_of_device_ids[] = {
+ { .compatible = "samsung,exynos4210-chipid" },
+ {}
+};
+
+static struct platform_driver exynos_asv_driver = {
+ .driver = {
+ .name = "exynos-asv",
+ .of_match_table = exynos_asv_of_device_ids,
+ },
+ .probe = exynos_asv_probe,
+};
+module_platform_driver(exynos_asv_driver);
diff --git a/drivers/soc/samsung/exynos-asv.h b/drivers/soc/samsung/exynos-asv.h
new file mode 100644
index 0000000..3fd1f2a
--- /dev/null
+++ b/drivers/soc/samsung/exynos-asv.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Samsung Exynos SoC Adaptive Supply Voltage support
+ */
+#ifndef __LINUX_SOC_EXYNOS_ASV_H
+#define __LINUX_SOC_EXYNOS_ASV_H
+
+struct regmap;
+
+/* HPM, IDS values to select target group */
+struct asv_limit_entry {
+ unsigned int hpm;
+ unsigned int ids;
+};
+
+struct exynos_asv_table {
+ unsigned int num_rows;
+ unsigned int num_cols;
+ u32 *buf;
+};
+
+struct exynos_asv_subsys {
+ struct exynos_asv *asv;
+ const char *cpu_dt_compat;
+ int id;
+ struct exynos_asv_table table;
+
+ unsigned int base_volt;
+ unsigned int offset_volt_h;
+ unsigned int offset_volt_l;
+};
+
+struct exynos_asv {
+ struct device *dev;
+ struct regmap *chipid_regmap;
+ struct exynos_asv_subsys subsys[2];
+
+ int (*opp_get_voltage)(const struct exynos_asv_subsys *subs,
+ int level, unsigned int voltage);
+ unsigned int group;
+ unsigned int table;
+
+ /* True if SG fields from PKG_ID register should be used */
+ bool use_sg;
+ /* ASV bin read from DT */
+ int of_bin;
+};
+
+static inline u32 __asv_get_table_entry(const struct exynos_asv_table *table,
+ unsigned int row, unsigned int col)
+{
+ return table->buf[row * (table->num_cols) + col];
+}
+
+static inline u32 exynos_asv_opp_get_voltage(const struct exynos_asv_subsys *subsys,
+ unsigned int level, unsigned int group)
+{
+ return __asv_get_table_entry(&subsys->table, level, group + 1);
+}
+
+static inline u32 exynos_asv_opp_get_frequency(const struct exynos_asv_subsys *subsys,
+ unsigned int level)
+{
+ return __asv_get_table_entry(&subsys->table, level, 0);
+}
+
+#endif /* __LINUX_SOC_EXYNOS_ASV_H */
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index c55a47c..8d4d050 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -3,7 +3,7 @@
* Copyright (c) 2019 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
- * EXYNOS - CHIP ID support
+ * Exynos - CHIP ID support
* Author: Pankaj Dubey <pankaj.dubey@samsung.com>
* Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
*/
@@ -45,17 +45,25 @@
return NULL;
}
-int __init exynos_chipid_early_init(void)
+static int __init exynos_chipid_early_init(void)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device_node *root;
+ struct device_node *syscon;
struct regmap *regmap;
u32 product_id;
u32 revision;
int ret;
- regmap = syscon_regmap_lookup_by_compatible("samsung,exynos4210-chipid");
+ syscon = of_find_compatible_node(NULL, NULL,
+ "samsung,exynos4210-chipid");
+ if (!syscon)
+ return -ENODEV;
+
+ regmap = device_node_to_regmap(syscon);
+ of_node_put(syscon);
+
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index d34ca20..17304fa 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS - CPU PMU(Power Management Unit) support
+// Exynos - CPU PMU(Power Management Unit) support
#include <linux/of.h>
#include <linux/of_address.h>
@@ -110,10 +110,8 @@
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmu_base_addr = devm_ioremap_resource(dev, res);
+ pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu_base_addr))
return PTR_ERR(pmu_base_addr);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 977e4da..5e851f3 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -3,7 +3,7 @@
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Header for EXYNOS PMU Driver support
+ * Header for Exynos PMU Driver support
*/
#ifndef __EXYNOS_PMU_H
diff --git a/drivers/soc/samsung/exynos-regulator-coupler.c b/drivers/soc/samsung/exynos-regulator-coupler.c
new file mode 100644
index 0000000..61a156b
--- /dev/null
+++ b/drivers/soc/samsung/exynos-regulator-coupler.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Simplified generic voltage coupler from regulator core.c
+ * The main difference is that it keeps current regulator voltage
+ * if consumers didn't apply their constraints yet.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
+ int *current_uV,
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
+{
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ struct regulator_dev **c_rdevs = c_desc->coupled_rdevs;
+ struct regulation_constraints *constraints = rdev->constraints;
+ int desired_min_uV = 0, desired_max_uV = INT_MAX;
+ int max_current_uV = 0, min_current_uV = INT_MAX;
+ int highest_min_uV = 0, target_uV, possible_uV;
+ int i, ret, max_spread, n_coupled = c_desc->n_coupled;
+ bool done;
+
+ *current_uV = -1;
+
+ /* Find highest min desired voltage */
+ for (i = 0; i < n_coupled; i++) {
+ int tmp_min = 0;
+ int tmp_max = INT_MAX;
+
+ lockdep_assert_held_once(&c_rdevs[i]->mutex.base);
+
+ ret = regulator_check_consumers(c_rdevs[i],
+ &tmp_min,
+ &tmp_max, state);
+ if (ret < 0)
+ return ret;
+
+ if (tmp_min == 0) {
+ ret = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (ret < 0)
+ return ret;
+ tmp_min = ret;
+ }
+
+ /* apply constraints */
+ ret = regulator_check_voltage(c_rdevs[i], &tmp_min, &tmp_max);
+ if (ret < 0)
+ return ret;
+
+ highest_min_uV = max(highest_min_uV, tmp_min);
+
+ if (i == 0) {
+ desired_min_uV = tmp_min;
+ desired_max_uV = tmp_max;
+ }
+ }
+
+ max_spread = constraints->max_spread[0];
+
+ /*
+ * Let target_uV be equal to the desired one if possible.
+ * If not, set it to minimum voltage, allowed by other coupled
+ * regulators.
+ */
+ target_uV = max(desired_min_uV, highest_min_uV - max_spread);
+
+ /*
+ * Find min and max voltages, which currently aren't violating
+ * max_spread.
+ */
+ for (i = 1; i < n_coupled; i++) {
+ int tmp_act;
+
+ tmp_act = regulator_get_voltage_rdev(c_rdevs[i]);
+ if (tmp_act < 0)
+ return tmp_act;
+
+ min_current_uV = min(tmp_act, min_current_uV);
+ max_current_uV = max(tmp_act, max_current_uV);
+ }
+
+ /*
+ * Correct target voltage, so as it currently isn't
+ * violating max_spread
+ */
+ possible_uV = max(target_uV, max_current_uV - max_spread);
+ possible_uV = min(possible_uV, min_current_uV + max_spread);
+
+ if (possible_uV > desired_max_uV)
+ return -EINVAL;
+
+ done = (possible_uV == target_uV);
+ desired_min_uV = possible_uV;
+
+ /* Set current_uV if wasn't done earlier in the code and if necessary */
+ if (*current_uV == -1) {
+ ret = regulator_get_voltage_rdev(rdev);
+ if (ret < 0)
+ return ret;
+ *current_uV = ret;
+ }
+
+ *min_uV = desired_min_uV;
+ *max_uV = desired_max_uV;
+
+ return done;
+}
+
+static int exynos_coupler_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator_dev **c_rdevs;
+ struct regulator_dev *best_rdev;
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev;
+ unsigned int delta, best_delta;
+ unsigned long c_rdev_done = 0;
+ bool best_c_rdev_done;
+
+ c_rdevs = c_desc->coupled_rdevs;
+ n_coupled = c_desc->n_coupled;
+
+ /*
+ * Find the best possible voltage change on each loop. Leave the loop
+ * if there isn't any possible change.
+ */
+ do {
+ best_c_rdev_done = false;
+ best_delta = 0;
+ best_min_uV = 0;
+ best_max_uV = 0;
+ best_c_rdev = 0;
+ best_rdev = NULL;
+
+ /*
+ * Find highest difference between optimal voltage
+ * and current voltage.
+ */
+ for (i = 0; i < n_coupled; i++) {
+ /*
+ * optimal_uV is the best voltage that can be set for
+ * i-th regulator at the moment without violating
+ * max_spread constraint in order to balance
+ * the coupled voltages.
+ */
+ int optimal_uV = 0, optimal_max_uV = 0, current_uV = 0;
+
+ if (test_bit(i, &c_rdev_done))
+ continue;
+
+ ret = regulator_get_optimal_voltage(c_rdevs[i],
+ ¤t_uV,
+ &optimal_uV,
+ &optimal_max_uV,
+ state);
+ if (ret < 0)
+ goto out;
+
+ delta = abs(optimal_uV - current_uV);
+
+ if (delta && best_delta <= delta) {
+ best_c_rdev_done = ret;
+ best_delta = delta;
+ best_rdev = c_rdevs[i];
+ best_min_uV = optimal_uV;
+ best_max_uV = optimal_max_uV;
+ best_c_rdev = i;
+ }
+ }
+
+ /* Nothing to change, return successfully */
+ if (!best_rdev) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = regulator_set_voltage_rdev(best_rdev, best_min_uV,
+ best_max_uV, state);
+
+ if (ret < 0)
+ goto out;
+
+ if (best_c_rdev_done)
+ set_bit(best_c_rdev, &c_rdev_done);
+
+ } while (n_coupled > 1);
+
+out:
+ return ret;
+}
+
+static int exynos_coupler_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ return 0;
+}
+
+static struct regulator_coupler exynos_coupler = {
+ .attach_regulator = exynos_coupler_attach,
+ .balance_voltage = exynos_coupler_balance_voltage,
+};
+
+static int __init exynos_coupler_init(void)
+{
+ if (!of_machine_is_compatible("samsung,exynos5800"))
+ return 0;
+
+ return regulator_coupler_register(&exynos_coupler);
+}
+arch_initcall(exynos_coupler_init);
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 275d348..30f230e 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS3250 - CPU PMU (Power Management Unit) support
+// Exynos3250 - CPU PMU (Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
index a7cdbf1..cb35103 100644
--- a/drivers/soc/samsung/exynos4-pmu.c
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS4 - CPU PMU(Power Management Unit) support
+// Exynos4 - CPU PMU(Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 19b38e0..7a2d50b 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS5250 - CPU PMU (Power Management Unit) support
+// Exynos5250 - CPU PMU (Power Management Unit) support
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index b236d3b..6fedcd7 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -3,7 +3,7 @@
// Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
// http://www.samsung.com/
//
-// EXYNOS5420 - CPU PMU (Power Management Unit) support
+// Exynos5420 - CPU PMU (Power Management Unit) support
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
new file mode 100644
index 0000000..01bb305
--- /dev/null
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Exynos 5422 SoC Adaptive Supply Voltage support
+ */
+
+#include <linux/bitrev.h>
+#include <linux/errno.h>
+#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-chipid.h>
+#include <linux/slab.h>
+
+#include "exynos-asv.h"
+#include "exynos5422-asv.h"
+
+#define ASV_GROUPS_NUM 14
+#define ASV_ARM_DVFS_NUM 20
+#define ASV_ARM_BIN2_DVFS_NUM 17
+#define ASV_KFC_DVFS_NUM 14
+#define ASV_KFC_BIN2_DVFS_NUM 12
+
+/*
+ * This array is a set of 4 ASV data tables, first column of each ASV table
+ * contains frequency value in MHz and subsequent columns contain the CPU
+ * cluster's supply voltage values in uV.
+ * In order to create a set of OPPs for specific SoC revision one of the voltage
+ * columns (1...14) from one of the tables (0...3) is selected during
+ * initialization. There are separate ASV tables for the big (ARM) and little
+ * (KFC) CPU cluster. Only OPPs which are already defined in devicetree
+ * will be updated.
+ */
+
+static const u32 asv_arm_table[][ASV_ARM_DVFS_NUM][ASV_GROUPS_NUM + 1] = {
+{
+ /* ARM 0, 1 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1300000, 1287500, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000,
+ 1162500, 1150000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1800, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1700, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1075000, 1062500, 1050000, 1037500, 1025000 },
+ { 1600, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1037500, 1025000, 1012500, 1000000, 987500 },
+ { 1500, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 1000000, 987500, 975000, 962500, 950000 },
+ { 1400, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 975000, 962500, 950000, 937500, 925000 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM 2 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1312500, 1300000, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1262500, 1250000, 1250000, 1237500, 1212500, 1200000, 1187500,
+ 1175000, 1162500, 1175000, 1162500, 1150000, 1137500, 1125000 },
+ { 1800, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500,
+ 1125000, 1112500, 1125000, 1112500, 1100000, 1087500, 1075000 },
+ { 1700, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1600, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1400, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000,
+ 987500, 975000, 987500, 975000, 962500, 950000, 937500 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM 3 */
+ { 2100, 1362500, 1362500, 1350000, 1337500, 1325000, 1312500, 1300000,
+ 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000 },
+ { 2000, 1312500, 1312500, 1300000, 1287500, 1275000, 1262500, 1250000,
+ 1237500, 1225000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1900, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500,
+ 1175000, 1162500, 1175000, 1162500, 1150000, 1137500, 1125000 },
+ { 1800, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500,
+ 1125000, 1112500, 1125000, 1112500, 1100000, 1087500, 1075000 },
+ { 1700, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1600, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1400, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000,
+ 987500, 975000, 987500, 975000, 962500, 950000, 937500 },
+ { 1300, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 962500, 950000, 937500, 925000, 912500 },
+ { 1200, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 937500, 925000, 912500, 900000, 900000 },
+ { 1100, 1000000, 987500, 975000, 962500, 950000, 937500, 925000,
+ 912500, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 1000, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 900, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* ARM bin 2 */
+ { 1800, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500,
+ 1150000, 1137500, 1150000, 1137500, 1125000, 1112500, 1100000 },
+ { 1700, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1600, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1075000, 1062500, 1050000, 1037500, 1025000 },
+ { 1500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1037500, 1025000, 1012500, 1000000, 987500 },
+ { 1400, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 1300, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 1000000, 987500, 975000, 962500, 950000 },
+ { 1200, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 975000, 962500, 950000, 937500, 925000 },
+ { 1100, 1037500, 1025000, 1012500, 1000000, 987500, 975000, 962500,
+ 950000, 937500, 950000, 937500, 925000, 912500, 900000 },
+ { 1000, 1012500, 1000000, 987500, 975000, 962500, 950000, 937500,
+ 925000, 912500, 925000, 912500, 900000, 900000, 900000 },
+ { 900, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 800, 962500, 950000, 937500, 925000, 912500, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 700, 937500, 925000, 912500, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}
+};
+
+static const u32 asv_kfc_table[][ASV_KFC_DVFS_NUM][ASV_GROUPS_NUM + 1] = {
+{
+ /* KFC 0, 1 */
+ { 1500000, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400000, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300000, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200000, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100000, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800000, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700000, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600000, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500000, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200000, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC 2 */
+ { 1500, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC 3 */
+ { 1500, 1300000, 1300000, 1300000, 1287500, 1287500, 1287500, 1275000,
+ 1262500, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500 },
+ { 1400, 1275000, 1262500, 1250000, 1237500, 1225000, 1212500, 1200000,
+ 1187500, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500 },
+ { 1300, 1225000, 1212500, 1200000, 1187500, 1175000, 1162500, 1150000,
+ 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500 },
+ { 1200, 1175000, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000,
+ 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500 },
+ { 1100, 1137500, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500,
+ 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000 },
+ { 1000, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000,
+ 1012500, 1000000, 987500, 975000, 962500, 950000, 937500 },
+ { 900, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000, 987500,
+ 975000, 962500, 950000, 937500, 925000, 912500, 900000 },
+ { 800, 1025000, 1012500, 1000000, 987500, 975000, 962500, 950000,
+ 937500, 925000, 912500, 900000, 900000, 900000, 900000 },
+ { 700, 987500, 975000, 962500, 950000, 937500, 925000, 912500,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 950000, 937500, 925000, 912500, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 912500, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}, {
+ /* KFC bin 2 */
+ { 1300, 1250000, 1237500, 1225000, 1212500, 1200000, 1187500, 1175000,
+ 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500 },
+ { 1200, 1200000, 1187500, 1175000, 1162500, 1150000, 1137500, 1125000,
+ 1112500, 1100000, 1087500, 1075000, 1062500, 1050000, 1037500 },
+ { 1100, 1162500, 1150000, 1137500, 1125000, 1112500, 1100000, 1087500,
+ 1075000, 1062500, 1050000, 1037500, 1025000, 1012500, 1000000 },
+ { 1000, 1125000, 1112500, 1100000, 1087500, 1075000, 1062500, 1050000,
+ 1037500, 1025000, 1012500, 1000000, 987500, 975000, 962500 },
+ { 900, 1087500, 1075000, 1062500, 1050000, 1037500, 1025000, 1012500,
+ 1000000, 987500, 975000, 962500, 950000, 937500, 925000 },
+ { 800, 1050000, 1037500, 1025000, 1012500, 1000000, 987500, 975000,
+ 962500, 950000, 937500, 925000, 912500, 900000, 900000 },
+ { 700, 1012500, 1000000, 987500, 975000, 962500, 950000, 937500,
+ 925000, 912500, 900000, 900000, 900000, 900000, 900000 },
+ { 600, 975000, 962500, 950000, 937500, 925000, 912500, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 500, 937500, 925000, 912500, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 400, 925000, 912500, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 300, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+ { 200, 900000, 900000, 900000, 900000, 900000, 900000, 900000,
+ 900000, 900000, 900000, 900000, 900000, 900000, 900000 },
+}
+};
+
+static const struct asv_limit_entry __asv_limits[ASV_GROUPS_NUM] = {
+ { 13, 55 },
+ { 21, 65 },
+ { 25, 69 },
+ { 30, 72 },
+ { 36, 74 },
+ { 43, 76 },
+ { 51, 78 },
+ { 65, 80 },
+ { 81, 82 },
+ { 98, 84 },
+ { 119, 87 },
+ { 135, 89 },
+ { 150, 92 },
+ { 999, 999 },
+};
+
+static int exynos5422_asv_get_group(struct exynos_asv *asv)
+{
+ unsigned int pkgid_reg, auxi_reg;
+ int hpm, ids, i;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PKG_ID, &pkgid_reg);
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_AUX_INFO, &auxi_reg);
+
+ if (asv->use_sg) {
+ u32 sga = (pkgid_reg >> EXYNOS5422_SG_A_OFFSET) &
+ EXYNOS5422_SG_A_MASK;
+
+ u32 sgb = (pkgid_reg >> EXYNOS5422_SG_B_OFFSET) &
+ EXYNOS5422_SG_B_MASK;
+
+ if ((pkgid_reg >> EXYNOS5422_SG_BSIGN_OFFSET) &
+ EXYNOS5422_SG_BSIGN_MASK)
+ return sga + sgb;
+ else
+ return sga - sgb;
+ }
+
+ hpm = (auxi_reg >> EXYNOS5422_TMCB_OFFSET) & EXYNOS5422_TMCB_MASK;
+ ids = (pkgid_reg >> EXYNOS5422_IDS_OFFSET) & EXYNOS5422_IDS_MASK;
+
+ for (i = 0; i < ASV_GROUPS_NUM; i++) {
+ if (ids <= __asv_limits[i].ids)
+ break;
+ if (hpm <= __asv_limits[i].hpm)
+ break;
+ }
+ if (i < ASV_GROUPS_NUM)
+ return i;
+
+ return 0;
+}
+
+static int __asv_offset_voltage(unsigned int index)
+{
+ switch (index) {
+ case 1:
+ return 12500;
+ case 2:
+ return 50000;
+ case 3:
+ return 25000;
+ default:
+ return 0;
+ };
+}
+
+static void exynos5422_asv_offset_voltage_setup(struct exynos_asv *asv)
+{
+ struct exynos_asv_subsys *subsys;
+ unsigned int reg, value;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_AUX_INFO, ®);
+
+ /* ARM offset voltage setup */
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_ARM];
+
+ subsys->base_volt = 1000000;
+
+ value = (reg >> EXYNOS5422_ARM_UP_OFFSET) & EXYNOS5422_ARM_UP_MASK;
+ subsys->offset_volt_h = __asv_offset_voltage(value);
+
+ value = (reg >> EXYNOS5422_ARM_DN_OFFSET) & EXYNOS5422_ARM_DN_MASK;
+ subsys->offset_volt_l = __asv_offset_voltage(value);
+
+ /* KFC offset voltage setup */
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_KFC];
+
+ subsys->base_volt = 1000000;
+
+ value = (reg >> EXYNOS5422_KFC_UP_OFFSET) & EXYNOS5422_KFC_UP_MASK;
+ subsys->offset_volt_h = __asv_offset_voltage(value);
+
+ value = (reg >> EXYNOS5422_KFC_DN_OFFSET) & EXYNOS5422_KFC_DN_MASK;
+ subsys->offset_volt_l = __asv_offset_voltage(value);
+}
+
+static int exynos5422_asv_opp_get_voltage(const struct exynos_asv_subsys *subsys,
+ int level, unsigned int volt)
+{
+ unsigned int asv_volt;
+
+ if (level >= subsys->table.num_rows)
+ return volt;
+
+ asv_volt = exynos_asv_opp_get_voltage(subsys, level,
+ subsys->asv->group);
+
+ if (volt > subsys->base_volt)
+ asv_volt += subsys->offset_volt_h;
+ else
+ asv_volt += subsys->offset_volt_l;
+
+ return asv_volt;
+}
+
+static unsigned int exynos5422_asv_parse_table(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_TABLE_OFFSET) & EXYNOS5422_TABLE_MASK;
+}
+
+static bool exynos5422_asv_parse_bin2(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_BIN2_OFFSET) & EXYNOS5422_BIN2_MASK;
+}
+
+static bool exynos5422_asv_parse_sg(unsigned int pkg_id)
+{
+ return (pkg_id >> EXYNOS5422_USESG_OFFSET) & EXYNOS5422_USESG_MASK;
+}
+
+int exynos5422_asv_init(struct exynos_asv *asv)
+{
+ struct exynos_asv_subsys *subsys;
+ unsigned int table_index;
+ unsigned int pkg_id;
+ bool bin2;
+
+ regmap_read(asv->chipid_regmap, EXYNOS_CHIPID_REG_PKG_ID, &pkg_id);
+
+ if (asv->of_bin == 2) {
+ bin2 = true;
+ asv->use_sg = false;
+ } else {
+ asv->use_sg = exynos5422_asv_parse_sg(pkg_id);
+ bin2 = exynos5422_asv_parse_bin2(pkg_id);
+ }
+
+ asv->group = exynos5422_asv_get_group(asv);
+ asv->table = exynos5422_asv_parse_table(pkg_id);
+
+ exynos5422_asv_offset_voltage_setup(asv);
+
+ if (bin2) {
+ table_index = 3;
+ } else {
+ if (asv->table == 2 || asv->table == 3)
+ table_index = asv->table - 1;
+ else
+ table_index = 0;
+ }
+
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_ARM];
+ subsys->cpu_dt_compat = "arm,cortex-a15";
+ if (bin2)
+ subsys->table.num_rows = ASV_ARM_BIN2_DVFS_NUM;
+ else
+ subsys->table.num_rows = ASV_ARM_DVFS_NUM;
+ subsys->table.num_cols = ASV_GROUPS_NUM + 1;
+ subsys->table.buf = (u32 *)asv_arm_table[table_index];
+
+ subsys = &asv->subsys[EXYNOS_ASV_SUBSYS_ID_KFC];
+ subsys->cpu_dt_compat = "arm,cortex-a7";
+ if (bin2)
+ subsys->table.num_rows = ASV_KFC_BIN2_DVFS_NUM;
+ else
+ subsys->table.num_rows = ASV_KFC_DVFS_NUM;
+ subsys->table.num_cols = ASV_GROUPS_NUM + 1;
+ subsys->table.buf = (u32 *)asv_kfc_table[table_index];
+
+ asv->opp_get_voltage = exynos5422_asv_opp_get_voltage;
+
+ return 0;
+}
diff --git a/drivers/soc/samsung/exynos5422-asv.h b/drivers/soc/samsung/exynos5422-asv.h
new file mode 100644
index 0000000..95a5fb1
--- /dev/null
+++ b/drivers/soc/samsung/exynos5422-asv.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Exynos 5422 SoC Adaptive Supply Voltage support
+ */
+
+#ifndef __LINUX_SOC_EXYNOS5422_ASV_H
+#define __LINUX_SOC_EXYNOS5422_ASV_H
+
+#include <linux/errno.h>
+
+enum {
+ EXYNOS_ASV_SUBSYS_ID_ARM,
+ EXYNOS_ASV_SUBSYS_ID_KFC,
+ EXYNOS_ASV_SUBSYS_ID_MAX
+};
+
+struct exynos_asv;
+
+#ifdef CONFIG_EXYNOS_ASV_ARM
+int exynos5422_asv_init(struct exynos_asv *asv);
+#else
+static inline int exynos5422_asv_init(struct exynos_asv *asv)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif /* __LINUX_SOC_EXYNOS5422_ASV_H */
diff --git a/drivers/soc/samsung/s3c-pm-check.c b/drivers/soc/samsung/s3c-pm-check.c
new file mode 100644
index 0000000..ff3e099
--- /dev/null
+++ b/drivers/soc/samsung/s3c-pm-check.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// originally in linux/arch/arm/plat-s3c24xx/pm.c
+//
+// Copyright (c) 2004-2008 Simtec Electronics
+// http://armlinux.simtec.co.uk
+// Ben Dooks <ben@simtec.co.uk>
+//
+// S3C Power Mangament - suspend/resume memory corruption check.
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include <linux/soc/samsung/s3c-pm.h>
+
+#if CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE < 1
+#error CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE must be a positive non-zero value
+#endif
+
+/* suspend checking code...
+ *
+ * this next area does a set of crc checks over all the installed
+ * memory, so the system can verify if the resume was ok.
+ *
+ * CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE defines the block-size for the CRC,
+ * increasing it will mean that the area corrupted will be less easy to spot,
+ * and reducing the size will cause the CRC save area to grow
+*/
+
+#define CHECK_CHUNKSIZE (CONFIG_SAMSUNG_PM_CHECK_CHUNKSIZE * 1024)
+
+static u32 crc_size; /* size needed for the crc block */
+static u32 *crcs; /* allocated over suspend/resume */
+
+typedef u32 *(run_fn_t)(struct resource *ptr, u32 *arg);
+
+/* s3c_pm_run_res
+ *
+ * go through the given resource list, and look for system ram
+*/
+
+static void s3c_pm_run_res(struct resource *ptr, run_fn_t fn, u32 *arg)
+{
+ while (ptr != NULL) {
+ if (ptr->child != NULL)
+ s3c_pm_run_res(ptr->child, fn, arg);
+
+ if ((ptr->flags & IORESOURCE_SYSTEM_RAM)
+ == IORESOURCE_SYSTEM_RAM) {
+ S3C_PMDBG("Found system RAM at %08lx..%08lx\n",
+ (unsigned long)ptr->start,
+ (unsigned long)ptr->end);
+ arg = (fn)(ptr, arg);
+ }
+
+ ptr = ptr->sibling;
+ }
+}
+
+static void s3c_pm_run_sysram(run_fn_t fn, u32 *arg)
+{
+ s3c_pm_run_res(&iomem_resource, fn, arg);
+}
+
+static u32 *s3c_pm_countram(struct resource *res, u32 *val)
+{
+ u32 size = (u32)resource_size(res);
+
+ size += CHECK_CHUNKSIZE-1;
+ size /= CHECK_CHUNKSIZE;
+
+ S3C_PMDBG("Area %08lx..%08lx, %d blocks\n",
+ (unsigned long)res->start, (unsigned long)res->end, size);
+
+ *val += size * sizeof(u32);
+ return val;
+}
+
+/* s3c_pm_prepare_check
+ *
+ * prepare the necessary information for creating the CRCs. This
+ * must be done before the final save, as it will require memory
+ * allocating, and thus touching bits of the kernel we do not
+ * know about.
+*/
+
+void s3c_pm_check_prepare(void)
+{
+ crc_size = 0;
+
+ s3c_pm_run_sysram(s3c_pm_countram, &crc_size);
+
+ S3C_PMDBG("s3c_pm_prepare_check: %u checks needed\n", crc_size);
+
+ crcs = kmalloc(crc_size+4, GFP_KERNEL);
+ if (crcs == NULL)
+ printk(KERN_ERR "Cannot allocated CRC save area\n");
+}
+
+static u32 *s3c_pm_makecheck(struct resource *res, u32 *val)
+{
+ unsigned long addr, left;
+
+ for (addr = res->start; addr < res->end;
+ addr += CHECK_CHUNKSIZE) {
+ left = res->end - addr;
+
+ if (left > CHECK_CHUNKSIZE)
+ left = CHECK_CHUNKSIZE;
+
+ *val = crc32_le(~0, phys_to_virt(addr), left);
+ val++;
+ }
+
+ return val;
+}
+
+/* s3c_pm_check_store
+ *
+ * compute the CRC values for the memory blocks before the final
+ * sleep.
+*/
+
+void s3c_pm_check_store(void)
+{
+ if (crcs != NULL)
+ s3c_pm_run_sysram(s3c_pm_makecheck, crcs);
+}
+
+/* in_region
+ *
+ * return TRUE if the area defined by ptr..ptr+size contains the
+ * what..what+whatsz
+*/
+
+static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
+{
+ if ((what+whatsz) < ptr)
+ return 0;
+
+ if (what > (ptr+size))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * s3c_pm_runcheck() - helper to check a resource on restore.
+ * @res: The resource to check
+ * @vak: Pointer to list of CRC32 values to check.
+ *
+ * Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this
+ * function runs the given memory resource checking it against the stored
+ * CRC to ensure that memory is restored. The function tries to skip as
+ * many of the areas used during the suspend process.
+ */
+static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
+{
+ unsigned long addr;
+ unsigned long left;
+ void *stkpage;
+ void *ptr;
+ u32 calc;
+
+ stkpage = (void *)((u32)&calc & ~PAGE_MASK);
+
+ for (addr = res->start; addr < res->end;
+ addr += CHECK_CHUNKSIZE) {
+ left = res->end - addr;
+
+ if (left > CHECK_CHUNKSIZE)
+ left = CHECK_CHUNKSIZE;
+
+ ptr = phys_to_virt(addr);
+
+ if (in_region(ptr, left, stkpage, 4096)) {
+ S3C_PMDBG("skipping %08lx, has stack in\n", addr);
+ goto skip_check;
+ }
+
+ if (in_region(ptr, left, crcs, crc_size)) {
+ S3C_PMDBG("skipping %08lx, has crc block in\n", addr);
+ goto skip_check;
+ }
+
+ /* calculate and check the checksum */
+
+ calc = crc32_le(~0, ptr, left);
+ if (calc != *val) {
+ printk(KERN_ERR "Restore CRC error at "
+ "%08lx (%08x vs %08x)\n", addr, calc, *val);
+
+ S3C_PMDBG("Restore CRC error at %08lx (%08x vs %08x)\n",
+ addr, calc, *val);
+ }
+
+ skip_check:
+ val++;
+ }
+
+ return val;
+}
+
+/**
+ * s3c_pm_check_restore() - memory check called on resume
+ *
+ * check the CRCs after the restore event and free the memory used
+ * to hold them
+*/
+void s3c_pm_check_restore(void)
+{
+ if (crcs != NULL)
+ s3c_pm_run_sysram(s3c_pm_runcheck, crcs);
+}
+
+/**
+ * s3c_pm_check_cleanup() - free memory resources
+ *
+ * Free the resources that where allocated by the suspend
+ * memory check code. We do this separately from the
+ * s3c_pm_check_restore() function as we cannot call any
+ * functions that might sleep during that resume.
+ */
+void s3c_pm_check_cleanup(void)
+{
+ kfree(crcs);
+ crcs = NULL;
+}
+
diff --git a/drivers/soc/samsung/s3c-pm-debug.c b/drivers/soc/samsung/s3c-pm-debug.c
new file mode 100644
index 0000000..b5ce0e9
--- /dev/null
+++ b/drivers/soc/samsung/s3c-pm-debug.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2013 Samsung Electronics Co., Ltd.
+// Tomasz Figa <t.figa@samsung.com>
+// Copyright (C) 2008 Openmoko, Inc.
+// Copyright (C) 2004-2008 Simtec Electronics
+// Ben Dooks <ben@simtec.co.uk>
+// http://armlinux.simtec.co.uk/
+//
+// Samsung common power management (suspend to RAM) debug support
+
+#include <linux/serial_core.h>
+#include <linux/serial_s3c.h>
+#include <linux/io.h>
+
+#include <asm/mach/map.h>
+
+#include <linux/soc/samsung/s3c-pm.h>
+
+static struct pm_uart_save uart_save;
+
+extern void printascii(const char *);
+
+void s3c_pm_dbg(const char *fmt, ...)
+{
+ va_list va;
+ char buff[256];
+
+ va_start(va, fmt);
+ vsnprintf(buff, sizeof(buff), fmt, va);
+ va_end(va);
+
+ printascii(buff);
+}
+
+static inline void __iomem *s3c_pm_uart_base(void)
+{
+ unsigned long paddr;
+ unsigned long vaddr;
+
+ debug_ll_addr(&paddr, &vaddr);
+
+ return (void __iomem *)vaddr;
+}
+
+void s3c_pm_save_uarts(bool is_s3c2410)
+{
+ void __iomem *regs = s3c_pm_uart_base();
+ struct pm_uart_save *save = &uart_save;
+
+ save->ulcon = __raw_readl(regs + S3C2410_ULCON);
+ save->ucon = __raw_readl(regs + S3C2410_UCON);
+ save->ufcon = __raw_readl(regs + S3C2410_UFCON);
+ save->umcon = __raw_readl(regs + S3C2410_UMCON);
+ save->ubrdiv = __raw_readl(regs + S3C2410_UBRDIV);
+
+ if (!is_s3c2410)
+ save->udivslot = __raw_readl(regs + S3C2443_DIVSLOT);
+
+ S3C_PMDBG("UART[%p]: ULCON=%04x, UCON=%04x, UFCON=%04x, UBRDIV=%04x\n",
+ regs, save->ulcon, save->ucon, save->ufcon, save->ubrdiv);
+}
+
+void s3c_pm_restore_uarts(bool is_s3c2410)
+{
+ void __iomem *regs = s3c_pm_uart_base();
+ struct pm_uart_save *save = &uart_save;
+
+ s3c_pm_arch_update_uart(regs, save);
+
+ __raw_writel(save->ulcon, regs + S3C2410_ULCON);
+ __raw_writel(save->ucon, regs + S3C2410_UCON);
+ __raw_writel(save->ufcon, regs + S3C2410_UFCON);
+ __raw_writel(save->umcon, regs + S3C2410_UMCON);
+ __raw_writel(save->ubrdiv, regs + S3C2410_UBRDIV);
+
+ if (!is_s3c2410)
+ __raw_writel(save->udivslot, regs + S3C2443_DIVSLOT);
+}
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
new file mode 100644
index 0000000..58cf8c4
--- /dev/null
+++ b/drivers/soc/sifive/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if SOC_SIFIVE
+
+config SIFIVE_L2
+ bool "Sifive L2 Cache controller"
+ help
+ Support for the L2 cache controller on SiFive platforms.
+
+endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
new file mode 100644
index 0000000..b5caff7
--- /dev/null
+++ b/drivers/soc/sifive/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SIFIVE_L2) += sifive_l2_cache.o
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
new file mode 100644
index 0000000..44d7e19
--- /dev/null
+++ b/drivers/soc/sifive/sifive_l2_cache.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive L2 cache controller Driver
+ *
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/device.h>
+#include <asm/cacheinfo.h>
+#include <soc/sifive/sifive_l2_cache.h>
+
+#define SIFIVE_L2_DIRECCFIX_LOW 0x100
+#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
+#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_L2_DATECCFIX_LOW 0x140
+#define SIFIVE_L2_DATECCFIX_HIGH 0x144
+#define SIFIVE_L2_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_L2_DATECCFAIL_LOW 0x160
+#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
+#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_L2_CONFIG 0x00
+#define SIFIVE_L2_WAYENABLE 0x08
+#define SIFIVE_L2_ECCINJECTERR 0x40
+
+#define SIFIVE_L2_MAX_ECCINTR 3
+
+static void __iomem *l2_base;
+static int g_irq[SIFIVE_L2_MAX_ECCINTR];
+static struct riscv_cacheinfo_ops l2_cache_ops;
+
+enum {
+ DIR_CORR = 0,
+ DATA_CORR,
+ DATA_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t l2_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ unsigned int val;
+
+ if (kstrtouint_from_user(data, count, 0, &val))
+ return -EINVAL;
+ if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations l2_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = l2_write
+};
+
+static void setup_sifive_debug(void)
+{
+ sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
+
+ debugfs_create_file("sifive_debug_inject_error", 0200,
+ sifive_test, NULL, &l2_fops);
+}
+#endif
+
+static void l2_config_read(void)
+{
+ u32 regval, val;
+
+ regval = readl(l2_base + SIFIVE_L2_CONFIG);
+ val = regval & 0xFF;
+ pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
+ val = (regval & 0xFF00) >> 8;
+ pr_info("L2CACHE: No. of ways per bank: %d\n", val);
+ val = (regval & 0xFF0000) >> 16;
+ pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
+ val = (regval & 0xFF000000) >> 24;
+ pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
+
+ regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
+ pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
+}
+
+static const struct of_device_id sifive_l2_ids[] = {
+ { .compatible = "sifive,fu540-c000-ccache" },
+ { /* end of table */ },
+};
+
+static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
+
+int register_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
+
+int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
+
+static int l2_largest_wayenabled(void)
+{
+ return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF;
+}
+
+static ssize_t number_of_ways_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", l2_largest_wayenabled());
+}
+
+static DEVICE_ATTR_RO(number_of_ways_enabled);
+
+static struct attribute *priv_attrs[] = {
+ &dev_attr_number_of_ways_enabled.attr,
+ NULL,
+};
+
+static const struct attribute_group priv_attr_group = {
+ .attrs = priv_attrs,
+};
+
+static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_leaf)
+{
+ /* We want to use private group for L2 cache only */
+ if (this_leaf->level == 2)
+ return &priv_attr_group;
+ else
+ return NULL;
+}
+
+static irqreturn_t l2_int_handler(int irq, void *device)
+{
+ unsigned int add_h, add_l;
+
+ if (irq == g_irq[DIR_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
+ pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DirError interrupt sig */
+ readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DirECCFix");
+ }
+ if (irq == g_irq[DATA_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
+ pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataError interrupt sig */
+ readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DatECCFix");
+ }
+ if (irq == g_irq[DATA_UNCORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
+ pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataFail interrupt sig */
+ readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
+ "DatECCFail");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init sifive_l2_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i, rc;
+
+ np = of_find_matching_node(NULL, sifive_l2_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ l2_base = ioremap(res.start, resource_size(&res));
+ if (!l2_base)
+ return -ENOMEM;
+
+ for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
+ g_irq[i] = irq_of_parse_and_map(np, i);
+ rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
+ if (rc) {
+ pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
+ return rc;
+ }
+ }
+
+ l2_config_read();
+
+ l2_cache_ops.get_priv_group = l2_get_priv_group;
+ riscv_set_cacheinfo_ops(&l2_cache_ops);
+
+#ifdef CONFIG_DEBUG_FS
+ setup_sifive_debug();
+#endif
+ return 0;
+}
+device_initcall(sifive_l2_init);
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 1b0d50f..d4c7bd5 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -194,7 +194,7 @@
if (!data) {
ret = -EINVAL;
goto err;
- };
+ }
for (func = data->func; func->func; func++) {
if (val == func->val) {
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 25df440..976dee0 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -15,6 +15,7 @@
select PL310_ERRATA_769419 if CACHE_L2X0
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
+ select SOC_TEGRA20_VOLTAGE_COUPLER
select TEGRA_TIMER
help
Support for NVIDIA Tegra AP20 and T20 processors, based on the
@@ -28,6 +29,7 @@
select PL310_ERRATA_769419 if CACHE_L2X0
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
+ select SOC_TEGRA30_VOLTAGE_COUPLER
select TEGRA_TIMER
help
Support for NVIDIA Tegra T30 processor family, based on the
@@ -117,6 +119,16 @@
help
Enable support for the NVIDIA Tegra194 SoC.
+config ARCH_TEGRA_234_SOC
+ bool "NVIDIA Tegra234 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ select SOC_TEGRA_PMC
+ help
+ Enable support for the NVIDIA Tegra234 SoC.
+
endif
endif
@@ -124,6 +136,7 @@
def_bool y
depends on ARCH_TEGRA
select SOC_BUS
+ select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC
config SOC_TEGRA_FLOWCTRL
bool
@@ -136,3 +149,11 @@
def_bool y
depends on PM_GENERIC_DOMAINS
depends on TEGRA_BPMP
+
+config SOC_TEGRA20_VOLTAGE_COUPLER
+ bool "Voltage scaling support for Tegra20 SoCs"
+ depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
+
+config SOC_TEGRA30_VOLTAGE_COUPLER
+ bool "Voltage scaling support for Tegra30 SoCs"
+ depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 902759f..9c809c1 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -5,3 +5,5 @@
obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
obj-$(CONFIG_SOC_TEGRA_POWERGATE_BPMP) += powergate-bpmp.o
+obj-$(CONFIG_SOC_TEGRA20_VOLTAGE_COUPLER) += regulators-tegra20.o
+obj-$(CONFIG_SOC_TEGRA30_VOLTAGE_COUPLER) += regulators-tegra30.o
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
index b6bdeef..5db919d 100644
--- a/drivers/soc/tegra/flowctrl.c
+++ b/drivers/soc/tegra/flowctrl.c
@@ -91,8 +91,23 @@
reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
/* clear wfi bitmap */
reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
- /* pwr gating on wfi */
- reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
+
+ if (tegra_get_chip_id() == TEGRA30) {
+ /*
+ * The wfi doesn't work well on Tegra30 because
+ * CPU hangs under some odd circumstances after
+ * power-gating (like memory running off PLLP),
+ * hence use wfe that is working perfectly fine.
+ * Note that Tegra30 TRM doc clearly stands that
+ * wfi should be used for the "Cluster Switching",
+ * while wfe for the power-gating, just like it
+ * is done on Tegra20.
+ */
+ reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
+ } else {
+ /* pwr gating on wfi */
+ reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
+ }
break;
}
reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
@@ -204,7 +219,7 @@
return 0;
}
- tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res));
+ tegra_flowctrl_base = ioremap(res.start, resource_size(&res));
if (!tegra_flowctrl_base)
return -ENXIO;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 3eb44e6..4388a4a 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -8,6 +8,8 @@
#include <linux/kobject.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -31,50 +33,6 @@
[TEGRA_REVISION_A04] = "A04",
};
-static u8 fuse_readb(struct tegra_fuse *fuse, unsigned int offset)
-{
- u32 val;
-
- val = fuse->read(fuse, round_down(offset, 4));
- val >>= (offset % 4) * 8;
- val &= 0xff;
-
- return val;
-}
-
-static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t pos, size_t size)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct tegra_fuse *fuse = dev_get_drvdata(dev);
- int i;
-
- if (pos < 0 || pos >= attr->size)
- return 0;
-
- if (size > attr->size - pos)
- size = attr->size - pos;
-
- for (i = 0; i < size; i++)
- buf[i] = fuse_readb(fuse, pos + i);
-
- return i;
-}
-
-static struct bin_attribute fuse_bin_attr = {
- .attr = { .name = "fuse", .mode = S_IRUGO, },
- .read = fuse_read,
-};
-
-static int tegra_fuse_create_sysfs(struct device *dev, unsigned int size,
- const struct tegra_fuse_info *info)
-{
- fuse_bin_attr.size = size;
-
- return device_create_bin_file(dev, &fuse_bin_attr);
-}
-
static const struct of_device_id car_match[] __initconst = {
{ .compatible = "nvidia,tegra20-car", },
{ .compatible = "nvidia,tegra30-car", },
@@ -91,6 +49,12 @@
};
static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+ { .compatible = "nvidia,tegra234-efuse", .data = &tegra234_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+ { .compatible = "nvidia,tegra194-efuse", .data = &tegra194_fuse_soc },
+#endif
#ifdef CONFIG_ARCH_TEGRA_186_SOC
{ .compatible = "nvidia,tegra186-efuse", .data = &tegra186_fuse_soc },
#endif
@@ -115,9 +79,111 @@
{ /* sentinel */ }
};
+static int tegra_fuse_read(void *priv, unsigned int offset, void *value,
+ size_t bytes)
+{
+ unsigned int count = bytes / 4, i;
+ struct tegra_fuse *fuse = priv;
+ u32 *buffer = value;
+
+ for (i = 0; i < count; i++)
+ buffer[i] = fuse->read(fuse, offset + i * 4);
+
+ return 0;
+}
+
+static const struct nvmem_cell_info tegra_fuse_cells[] = {
+ {
+ .name = "tsensor-cpu1",
+ .offset = 0x084,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu2",
+ .offset = 0x088,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu0",
+ .offset = 0x098,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu3",
+ .offset = 0x12c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "sata-calibration",
+ .offset = 0x124,
+ .bytes = 1,
+ .bit_offset = 0,
+ .nbits = 2,
+ }, {
+ .name = "tsensor-gpu",
+ .offset = 0x154,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem0",
+ .offset = 0x158,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem1",
+ .offset = 0x15c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-pllx",
+ .offset = 0x160,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-common",
+ .offset = 0x180,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-realignment",
+ .offset = 0x1fc,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "gpu-calibration",
+ .offset = 0x204,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration-ext",
+ .offset = 0x250,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
static int tegra_fuse_probe(struct platform_device *pdev)
{
void __iomem *base = fuse->base;
+ struct nvmem_config nvmem;
struct resource *res;
int err;
@@ -146,20 +212,42 @@
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
- if (err < 0) {
- fuse->base = base;
- return err;
- }
+ if (err < 0)
+ goto restore;
}
- if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
- fuse->soc->info))
- return -ENODEV;
+ memset(&nvmem, 0, sizeof(nvmem));
+ nvmem.dev = &pdev->dev;
+ nvmem.name = "fuse";
+ nvmem.id = -1;
+ nvmem.owner = THIS_MODULE;
+ nvmem.cells = tegra_fuse_cells;
+ nvmem.ncells = ARRAY_SIZE(tegra_fuse_cells);
+ nvmem.type = NVMEM_TYPE_OTP;
+ nvmem.read_only = true;
+ nvmem.root_only = true;
+ nvmem.reg_read = tegra_fuse_read;
+ nvmem.size = fuse->soc->info->size;
+ nvmem.word_size = 4;
+ nvmem.stride = 4;
+ nvmem.priv = fuse;
+
+ fuse->nvmem = devm_nvmem_register(&pdev->dev, &nvmem);
+ if (IS_ERR(fuse->nvmem)) {
+ err = PTR_ERR(fuse->nvmem);
+ dev_err(&pdev->dev, "failed to register NVMEM device: %d\n",
+ err);
+ goto restore;
+ }
/* release the early I/O memory mapping */
iounmap(base);
return 0;
+
+restore:
+ fuse->base = base;
+ return err;
}
static struct platform_driver tegra_fuse_driver = {
@@ -172,7 +260,7 @@
};
builtin_platform_driver(tegra_fuse_driver);
-bool __init tegra_fuse_read_spare(unsigned int spare)
+u32 __init tegra_fuse_read_spare(unsigned int spare)
{
unsigned int offset = fuse->soc->info->spare + spare * 4;
@@ -186,9 +274,12 @@
int tegra_fuse_readl(unsigned long offset, u32 *value)
{
- if (!fuse->read)
+ if (!fuse->read || !fuse->clk)
return -EPROBE_DEFER;
+ if (IS_ERR(fuse->clk))
+ return PTR_ERR(fuse->clk);
+
*value = fuse->read(fuse, offset);
return 0;
@@ -212,6 +303,60 @@
writel(reg, base + 0x14);
}
+static ssize_t major_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_major_rev());
+}
+
+static DEVICE_ATTR_RO(major);
+
+static ssize_t minor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_minor_rev());
+}
+
+static DEVICE_ATTR_RO(minor);
+
+static struct attribute *tegra_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ NULL,
+};
+
+const struct attribute_group tegra_soc_attr_group = {
+ .attrs = tegra_soc_attr,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * Displays the value in the 'pre_si_platform' field of the HIDREV
+ * register for Tegra194 devices. A value of 0 indicates that the
+ * platform type is silicon and all other non-zero values indicate
+ * the type of simulation platform is being used.
+ */
+ return sprintf(buf, "%d\n", tegra_get_platform());
+}
+
+static DEVICE_ATTR_RO(platform);
+
+static struct attribute *tegra194_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ &dev_attr_platform.attr,
+ NULL,
+};
+
+const struct attribute_group tegra194_soc_attr_group = {
+ .attrs = tegra194_soc_attr,
+};
+#endif
+
struct device * __init tegra_soc_device_register(void)
{
struct soc_device_attribute *attr;
@@ -222,8 +367,10 @@
return NULL;
attr->family = kasprintf(GFP_KERNEL, "Tegra");
- attr->revision = kasprintf(GFP_KERNEL, "%d", tegra_sku_info.revision);
+ attr->revision = kasprintf(GFP_KERNEL, "%s",
+ tegra_revision_name[tegra_sku_info.revision]);
attr->soc_id = kasprintf(GFP_KERNEL, "%u", tegra_get_chip_id());
+ attr->custom_attr_group = fuse->soc->soc_attr_group;
dev = soc_device_register(attr);
if (IS_ERR(dev)) {
@@ -323,7 +470,7 @@
}
}
- fuse->base = ioremap_nocache(regs.start, resource_size(®s));
+ fuse->base = ioremap(regs.start, resource_size(®s));
if (!fuse->base) {
pr_err("failed to map FUSE registers\n");
return -ENXIO;
@@ -338,6 +485,15 @@
pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+ if (fuse->soc->lookups) {
+ size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups;
+
+ fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL);
+ if (!fuse->lookups)
+ return -ENOMEM;
+
+ nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
+ }
return 0;
}
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index d4aef9c..16aaa28 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -164,4 +164,5 @@
.speedo_init = tegra20_init_speedo_data,
.probe = tegra20_fuse_probe,
.info = &tegra20_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index 15060c8..c1aa781 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -111,6 +112,7 @@
.init = tegra30_fuse_init,
.speedo_init = tegra30_init_speedo_data,
.info = &tegra30_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -125,10 +127,75 @@
.init = tegra30_fuse_init,
.speedo_init = tegra114_init_speedo_data,
.info = &tegra114_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+static const struct nvmem_cell_lookup tegra124_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "sata-calibration",
+ .dev_id = "70020000.sata",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-realignment",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "realignment",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ },
+};
+
static const struct tegra_fuse_info tegra124_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x300,
@@ -139,10 +206,82 @@
.init = tegra30_fuse_init,
.speedo_init = tegra124_init_speedo_data,
.info = &tegra124_fuse_info,
+ .lookups = tegra124_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra124_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct nvmem_cell_lookup tegra210_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "sata-calibration",
+ .dev_id = "70020000.sata",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "gpu-calibration",
+ .dev_id = "57000000.gpu",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
static const struct tegra_fuse_info tegra210_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x300,
@@ -153,10 +292,27 @@
.init = tegra30_fuse_init,
.speedo_init = tegra210_init_speedo_data,
.info = &tegra210_fuse_info,
+ .lookups = tegra210_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra210_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
#if defined(CONFIG_ARCH_TEGRA_186_SOC)
+static const struct nvmem_cell_lookup tegra186_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
static const struct tegra_fuse_info tegra186_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x300,
@@ -166,5 +322,68 @@
const struct tegra_fuse_soc tegra186_fuse_soc = {
.init = tegra30_fuse_init,
.info = &tegra186_fuse_info,
+ .lookups = tegra186_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+static const struct nvmem_cell_lookup tegra194_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
+static const struct tegra_fuse_info tegra194_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra194_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra194_fuse_info,
+ .lookups = tegra194_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra194_fuse_lookups),
+ .soc_attr_group = &tegra194_soc_attr_group,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+static const struct nvmem_cell_lookup tegra234_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration-ext",
+ .dev_id = "3520000.padctl",
+ .con_id = "calibration-ext",
+ },
+};
+
+static const struct tegra_fuse_info tegra234_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x300,
+ .spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra234_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra234_fuse_info,
+ .lookups = tegra234_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra234_fuse_lookups),
+ .soc_attr_group = &tegra194_soc_attr_group,
};
#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 7230cb3..21887a5 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -13,6 +13,8 @@
#include <linux/dmaengine.h>
#include <linux/types.h>
+struct nvmem_cell_lookup;
+struct nvmem_device;
struct tegra_fuse;
struct tegra_fuse_info {
@@ -27,6 +29,11 @@
int (*probe)(struct tegra_fuse *fuse);
const struct tegra_fuse_info *info;
+
+ const struct nvmem_cell_lookup *lookups;
+ unsigned int num_lookups;
+
+ const struct attribute_group *soc_attr_group;
};
struct tegra_fuse {
@@ -48,14 +55,22 @@
dma_addr_t phys;
u32 *virt;
} apbdma;
+
+ struct nvmem_device *nvmem;
+ struct nvmem_cell_lookup *lookups;
};
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
-bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
+u8 tegra_get_major_rev(void);
+u8 tegra_get_minor_rev(void);
+
+extern const struct attribute_group tegra_soc_attr_group;
+
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
#endif
@@ -100,4 +115,17 @@
extern const struct tegra_fuse_soc tegra186_fuse_soc;
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct attribute_group tegra194_soc_attr_group;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+extern const struct tegra_fuse_soc tegra194_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_234_SOC
+extern const struct tegra_fuse_soc tegra234_fuse_soc;
+#endif
+
#endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index f8b9c40..cee207d 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -21,18 +21,15 @@
#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \
(0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
-static void __iomem *apbmisc_base;
-static void __iomem *strapping_base;
static bool long_ram_code;
+static u32 strapping;
+static u32 chipid;
u32 tegra_read_chipid(void)
{
- if (!apbmisc_base) {
- WARN(1, "Tegra Chip ID not yet available\n");
- return 0;
- }
+ WARN(!chipid, "Tegra APB MISC not yet available\n");
- return readl_relaxed(apbmisc_base + 4);
+ return chipid;
}
u8 tegra_get_chip_id(void)
@@ -40,12 +37,46 @@
return (tegra_read_chipid() >> 8) & 0xff;
}
+u8 tegra_get_major_rev(void)
+{
+ return (tegra_read_chipid() >> 4) & 0xf;
+}
+
+u8 tegra_get_minor_rev(void)
+{
+ return (tegra_read_chipid() >> 16) & 0xf;
+}
+
+u8 tegra_get_platform(void)
+{
+ return (tegra_read_chipid() >> 20) & 0xf;
+}
+
+bool tegra_is_silicon(void)
+{
+ switch (tegra_get_chip_id()) {
+ case TEGRA194:
+ case TEGRA234:
+ if (tegra_get_platform() == 0)
+ return true;
+
+ return false;
+ }
+
+ /*
+ * Chips prior to Tegra194 have a different way of determining whether
+ * they are silicon or not. Since we never supported simulation on the
+ * older Tegra chips, don't bother extracting the information and just
+ * report that we're running on silicon.
+ */
+ return true;
+}
+
u32 tegra_read_straps(void)
{
- if (strapping_base)
- return readl_relaxed(strapping_base);
- else
- return 0;
+ WARN(!chipid, "Tegra ABP MISC not yet available\n");
+
+ return strapping;
}
u32 tegra_read_ram_code(void)
@@ -63,46 +94,45 @@
static const struct of_device_id apbmisc_match[] __initconst = {
{ .compatible = "nvidia,tegra20-apbmisc", },
{ .compatible = "nvidia,tegra186-misc", },
+ { .compatible = "nvidia,tegra194-misc", },
+ { .compatible = "nvidia,tegra234-misc", },
{},
};
void __init tegra_init_revision(void)
{
- u32 id, chip_id, minor_rev;
- int rev;
+ u8 chip_id, minor_rev;
- id = tegra_read_chipid();
- chip_id = (id >> 8) & 0xff;
- minor_rev = (id >> 16) & 0xf;
+ chip_id = tegra_get_chip_id();
+ minor_rev = tegra_get_minor_rev();
switch (minor_rev) {
case 1:
- rev = TEGRA_REVISION_A01;
+ tegra_sku_info.revision = TEGRA_REVISION_A01;
break;
case 2:
- rev = TEGRA_REVISION_A02;
+ tegra_sku_info.revision = TEGRA_REVISION_A02;
break;
case 3:
if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
tegra_fuse_read_spare(19)))
- rev = TEGRA_REVISION_A03p;
+ tegra_sku_info.revision = TEGRA_REVISION_A03p;
else
- rev = TEGRA_REVISION_A03;
+ tegra_sku_info.revision = TEGRA_REVISION_A03;
break;
case 4:
- rev = TEGRA_REVISION_A04;
+ tegra_sku_info.revision = TEGRA_REVISION_A04;
break;
default:
- rev = TEGRA_REVISION_UNKNOWN;
+ tegra_sku_info.revision = TEGRA_REVISION_UNKNOWN;
}
- tegra_sku_info.revision = rev;
-
tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
}
void __init tegra_init_apbmisc(void)
{
+ void __iomem *apbmisc_base, *strapping_base;
struct resource apbmisc, straps;
struct device_node *np;
@@ -159,13 +189,21 @@
}
}
- apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
- if (!apbmisc_base)
+ apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
+ if (!apbmisc_base) {
pr_err("failed to map APBMISC registers\n");
+ } else {
+ chipid = readl_relaxed(apbmisc_base + 4);
+ iounmap(apbmisc_base);
+ }
- strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
- if (!strapping_base)
+ strapping_base = ioremap(straps.start, resource_size(&straps));
+ if (!strapping_base) {
pr_err("failed to map strapping options registers\n");
+ } else {
+ strapping = readl_relaxed(strapping_base);
+ iounmap(strapping_base);
+ }
long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 0447afa..5726c23 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3,7 +3,7 @@
* drivers/soc/tegra/pmc.c
*
* Copyright (c) 2010 Google, Inc
- * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -13,9 +13,13 @@
#include <linux/arm-smccc.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/clk-conf.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -48,6 +52,7 @@
#include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h>
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
+#include <dt-bindings/soc/tegra-pmc.h>
#define PMC_CNTRL 0x0
#define PMC_CNTRL_INTR_POLARITY BIT(17) /* inverts INTR polarity */
@@ -56,8 +61,17 @@
#define PMC_CNTRL_SIDE_EFFECT_LP0 BIT(14) /* LP0 when CPU pwr gated */
#define PMC_CNTRL_SYSCLK_OE BIT(11) /* system clock enable */
#define PMC_CNTRL_SYSCLK_POLARITY BIT(10) /* sys clk polarity */
+#define PMC_CNTRL_PWRREQ_POLARITY BIT(8)
+#define PMC_CNTRL_BLINK_EN 7
#define PMC_CNTRL_MAIN_RST BIT(4)
+#define PMC_WAKE_MASK 0x0c
+#define PMC_WAKE_LEVEL 0x10
+#define PMC_WAKE_STATUS 0x14
+#define PMC_SW_WAKE_STATUS 0x18
+#define PMC_DPD_PADS_ORIDE 0x1c
+#define PMC_DPD_PADS_ORIDE_BLINK 20
+
#define DPD_SAMPLE 0x020
#define DPD_SAMPLE_ENABLE BIT(0)
#define DPD_SAMPLE_DISABLE (0 << 0)
@@ -69,6 +83,7 @@
#define PWRGATE_STATUS 0x38
+#define PMC_BLINK_TIMER 0x40
#define PMC_IMPL_E_33V_PWR 0x40
#define PMC_PWR_DET 0x48
@@ -82,11 +97,20 @@
#define PMC_CPUPWRGOOD_TIMER 0xc8
#define PMC_CPUPWROFF_TIMER 0xcc
+#define PMC_COREPWRGOOD_TIMER 0x3c
+#define PMC_COREPWROFF_TIMER 0xe0
#define PMC_PWR_DET_VALUE 0xe4
#define PMC_SCRATCH41 0x140
+#define PMC_WAKE2_MASK 0x160
+#define PMC_WAKE2_LEVEL 0x164
+#define PMC_WAKE2_STATUS 0x168
+#define PMC_SW_WAKE2_STATUS 0x16c
+
+#define PMC_CLK_OUT_CNTRL 0x1a8
+#define PMC_CLK_OUT_MUX_MASK GENMASK(1, 0)
#define PMC_SENSOR_CTRL 0x1b0
#define PMC_SENSOR_CTRL_SCRATCH_WRITE BIT(2)
#define PMC_SENSOR_CTRL_ENABLE_RST BIT(1)
@@ -142,6 +166,71 @@
#define TEGRA_SMC_PMC_READ 0xaa
#define TEGRA_SMC_PMC_WRITE 0xbb
+struct pmc_clk {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 mux_shift;
+ u32 force_en_shift;
+};
+
+#define to_pmc_clk(_hw) container_of(_hw, struct pmc_clk, hw)
+
+struct pmc_clk_gate {
+ struct clk_hw hw;
+ unsigned long offs;
+ u32 shift;
+};
+
+#define to_pmc_clk_gate(_hw) container_of(_hw, struct pmc_clk_gate, hw)
+
+struct pmc_clk_init_data {
+ char *name;
+ const char *const *parents;
+ int num_parents;
+ int clk_id;
+ u8 mux_shift;
+ u8 force_en_shift;
+};
+
+static const char * const clk_out1_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern1",
+};
+
+static const char * const clk_out2_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern2",
+};
+
+static const char * const clk_out3_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern3",
+};
+
+static const struct pmc_clk_init_data tegra_pmc_clks_data[] = {
+ {
+ .name = "pmc_clk_out_1",
+ .parents = clk_out1_parents,
+ .num_parents = ARRAY_SIZE(clk_out1_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_1,
+ .mux_shift = 6,
+ .force_en_shift = 2,
+ },
+ {
+ .name = "pmc_clk_out_2",
+ .parents = clk_out2_parents,
+ .num_parents = ARRAY_SIZE(clk_out2_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_2,
+ .mux_shift = 14,
+ .force_en_shift = 10,
+ },
+ {
+ .name = "pmc_clk_out_3",
+ .parents = clk_out3_parents,
+ .num_parents = ARRAY_SIZE(clk_out3_parents),
+ .clk_id = TEGRA_PMC_CLK_OUT_3,
+ .mux_shift = 22,
+ .force_en_shift = 18,
+ },
+};
+
struct tegra_powergate {
struct generic_pm_domain genpd;
struct tegra_pmc *pmc;
@@ -226,6 +315,10 @@
void (*setup_irq_polarity)(struct tegra_pmc *pmc,
struct device_node *np,
bool invert);
+ int (*irq_set_wake)(struct irq_data *data, unsigned int on);
+ int (*irq_set_type)(struct irq_data *data, unsigned int type);
+ int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state);
const char * const *reset_sources;
unsigned int num_reset_sources;
@@ -239,45 +332,10 @@
*/
const struct tegra_wake_event *wake_events;
unsigned int num_wake_events;
-};
-static const char * const tegra186_reset_sources[] = {
- "SYS_RESET",
- "AOWDT",
- "MCCPLEXWDT",
- "BPMPWDT",
- "SCEWDT",
- "SPEWDT",
- "APEWDT",
- "BCCPLEXWDT",
- "SENSOR",
- "AOTAG",
- "VFSENSOR",
- "SWREST",
- "SC7",
- "HSM",
- "CORESIGHT"
-};
-
-static const char * const tegra186_reset_levels[] = {
- "L0", "L1", "L2", "WARM"
-};
-
-static const char * const tegra30_reset_sources[] = {
- "POWER_ON_RESET",
- "WATCHDOG",
- "SENSOR",
- "SW_MAIN",
- "LP0"
-};
-
-static const char * const tegra210_reset_sources[] = {
- "POWER_ON_RESET",
- "WATCHDOG",
- "SENSOR",
- "SW_MAIN",
- "LP0",
- "AOTAG"
+ const struct pmc_clk_init_data *pmc_clks_data;
+ unsigned int num_pmc_clks;
+ bool has_blink_output;
};
/**
@@ -309,6 +367,7 @@
* @pctl_dev: pin controller exposed by the PMC
* @domain: IRQ domain provided by the PMC
* @irq: chip implementation for the IRQ domain
+ * @clk_nb: pclk clock changes handler
*/
struct tegra_pmc {
struct device *dev;
@@ -344,6 +403,8 @@
struct irq_domain *domain;
struct irq_chip irq;
+
+ struct notifier_block clk_nb;
};
static struct tegra_pmc *pmc = &(struct tegra_pmc) {
@@ -458,6 +519,63 @@
return -ENODEV;
}
+static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
+{
+ unsigned int retries = 100;
+ bool status;
+ int ret;
+
+ /*
+ * As per TRM documentation, the toggle command will be dropped by PMC
+ * if there is contention with a HW-initiated toggling (i.e. CPU core
+ * power-gated), the command should be retried in that case.
+ */
+ do {
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ /* wait for PMC to execute the command */
+ ret = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 1, 10);
+ } while (ret == -ETIMEDOUT && retries--);
+
+ return ret;
+}
+
+static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
+{
+ return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
+}
+
+static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
+{
+ bool status;
+ int err;
+
+ /* wait while PMC power gating is contended */
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
+ status == true, 1, 100);
+ if (err)
+ return err;
+
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ /* wait for PMC to accept the command */
+ err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
+ status == true, 1, 100);
+ if (err)
+ return err;
+
+ /* wait for PMC to execute the command */
+ err = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 10, 100000);
+ if (err)
+ return err;
+
+ return 0;
+}
+
/**
* tegra_powergate_set() - set the state of a partition
* @pmc: power management controller
@@ -467,7 +585,6 @@
static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
bool new_state)
{
- bool status;
int err;
if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
@@ -480,10 +597,7 @@
return 0;
}
- tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
-
- err = readx_poll_timeout(tegra_powergate_state, id, status,
- status == new_state, 10, 100000);
+ err = pmc->soc->powergate_set(pmc, id, new_state);
mutex_unlock(&pmc->powergates_lock);
@@ -579,7 +693,7 @@
err = tegra_powergate_enable_clocks(pg);
if (err)
- goto disable_clks;
+ goto powergate_off;
usleep_range(10, 20);
@@ -591,7 +705,7 @@
err = reset_control_deassert(pg->reset);
if (err)
- goto powergate_off;
+ goto disable_clks;
usleep_range(10, 20);
@@ -1192,7 +1306,7 @@
return err;
if (pmc->clk) {
- rate = clk_get_rate(pmc->clk);
+ rate = pmc->rate;
if (!rate) {
dev_err(pmc->dev, "failed to get clock rate\n");
return -ENODEV;
@@ -1433,6 +1547,7 @@
void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
{
unsigned long long rate = 0;
+ u64 ticks;
u32 value;
switch (mode) {
@@ -1441,7 +1556,7 @@
break;
case TEGRA_SUSPEND_LP2:
- rate = clk_get_rate(pmc->clk);
+ rate = pmc->rate;
break;
default:
@@ -1451,21 +1566,13 @@
if (WARN_ON_ONCE(rate == 0))
rate = 100000000;
- if (rate != pmc->rate) {
- u64 ticks;
+ ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
- ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
- do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
-
- ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
- do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
-
- wmb();
-
- pmc->rate = rate;
- }
+ ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
value = tegra_pmc_readl(pmc, PMC_CNTRL);
value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
@@ -1899,44 +2006,17 @@
event->id,
&pmc->irq, pmc);
- /*
- * GPIOs don't have an equivalent interrupt in the
- * parent controller (GIC). However some code, such
- * as the one in irq_get_irqchip_state(), require a
- * valid IRQ chip to be set. Make sure that's the
- * case by passing NULL here, which will install a
- * dummy IRQ chip for the interrupt in the parent
- * domain.
- */
- if (domain->parent)
- irq_domain_set_hwirq_and_chip(domain->parent,
- virq, 0, NULL,
- NULL);
-
+ /* GPIO hierarchies stop at the PMC level */
+ if (!err && domain->parent)
+ err = irq_domain_disconnect_hierarchy(domain->parent,
+ virq);
break;
}
}
- /*
- * For interrupts that don't have associated wake events, assign a
- * dummy hardware IRQ number. This is used in the ->irq_set_type()
- * and ->irq_set_wake() callbacks to return early for these IRQs.
- */
- if (i == soc->num_wake_events) {
- err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
- &pmc->irq, pmc);
-
- /*
- * Interrupts without a wake event don't have a corresponding
- * interrupt in the parent controller (GIC). Pass NULL for the
- * chip here, which causes a dummy IRQ chip to be installed
- * for the interrupt in the parent domain, to make this
- * explicit.
- */
- if (domain->parent)
- irq_domain_set_hwirq_and_chip(domain->parent, virq, 0,
- NULL, NULL);
- }
+ /* If there is no wake-up event, there is no PMC mapping */
+ if (i == soc->num_wake_events)
+ err = irq_domain_disconnect_hierarchy(domain, virq);
return err;
}
@@ -1946,15 +2026,85 @@
.alloc = tegra_pmc_irq_alloc,
};
-static int tegra_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
+static int tegra210_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
unsigned int offset, bit;
u32 value;
- /* nothing to do if there's no associated wake event */
- if (WARN_ON(data->hwirq == ULONG_MAX))
- return 0;
+ offset = data->hwirq / 32;
+ bit = data->hwirq % 32;
+
+ /* clear wake status */
+ tegra_pmc_writel(pmc, 0, PMC_SW_WAKE_STATUS);
+ tegra_pmc_writel(pmc, 0, PMC_SW_WAKE2_STATUS);
+
+ tegra_pmc_writel(pmc, 0, PMC_WAKE_STATUS);
+ tegra_pmc_writel(pmc, 0, PMC_WAKE2_STATUS);
+
+ /* enable PMC wake */
+ if (data->hwirq >= 32)
+ offset = PMC_WAKE2_MASK;
+ else
+ offset = PMC_WAKE_MASK;
+
+ value = tegra_pmc_readl(pmc, offset);
+
+ if (on)
+ value |= BIT(bit);
+ else
+ value &= ~BIT(bit);
+
+ tegra_pmc_writel(pmc, value, offset);
+
+ return 0;
+}
+
+static int tegra210_pmc_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ unsigned int offset, bit;
+ u32 value;
+
+ offset = data->hwirq / 32;
+ bit = data->hwirq % 32;
+
+ if (data->hwirq >= 32)
+ offset = PMC_WAKE2_LEVEL;
+ else
+ offset = PMC_WAKE_LEVEL;
+
+ value = tegra_pmc_readl(pmc, offset);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ value |= BIT(bit);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ value &= ~BIT(bit);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING:
+ value ^= BIT(bit);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tegra_pmc_writel(pmc, value, offset);
+
+ return 0;
+}
+
+static int tegra186_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
+ unsigned int offset, bit;
+ u32 value;
offset = data->hwirq / 32;
bit = data->hwirq % 32;
@@ -1978,15 +2128,11 @@
return 0;
}
-static int tegra_pmc_irq_set_type(struct irq_data *data, unsigned int type)
+static int tegra186_pmc_irq_set_type(struct irq_data *data, unsigned int type)
{
struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
u32 value;
- /* nothing to do if there's no associated wake event */
- if (data->hwirq == ULONG_MAX)
- return 0;
-
value = readl(pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq));
switch (type) {
@@ -2013,6 +2159,34 @@
return 0;
}
+static void tegra_irq_mask_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_mask_parent(data);
+}
+
+static void tegra_irq_unmask_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_unmask_parent(data);
+}
+
+static void tegra_irq_eoi_parent(struct irq_data *data)
+{
+ if (data->parent_data)
+ irq_chip_eoi_parent(data);
+}
+
+static int tegra_irq_set_affinity_parent(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
{
struct irq_domain *parent = NULL;
@@ -2028,12 +2202,12 @@
return 0;
pmc->irq.name = dev_name(pmc->dev);
- pmc->irq.irq_mask = irq_chip_mask_parent;
- pmc->irq.irq_unmask = irq_chip_unmask_parent;
- pmc->irq.irq_eoi = irq_chip_eoi_parent;
- pmc->irq.irq_set_affinity = irq_chip_set_affinity_parent;
- pmc->irq.irq_set_type = tegra_pmc_irq_set_type;
- pmc->irq.irq_set_wake = tegra_pmc_irq_set_wake;
+ pmc->irq.irq_mask = tegra_irq_mask_parent;
+ pmc->irq.irq_unmask = tegra_irq_unmask_parent;
+ pmc->irq.irq_eoi = tegra_irq_eoi_parent;
+ pmc->irq.irq_set_affinity = tegra_irq_set_affinity_parent;
+ pmc->irq.irq_set_type = pmc->soc->irq_set_type;
+ pmc->irq.irq_set_wake = pmc->soc->irq_set_wake;
pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node,
&tegra_pmc_irq_domain_ops, pmc);
@@ -2045,6 +2219,285 @@
return 0;
}
+static int tegra_pmc_clk_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ struct tegra_pmc *pmc = container_of(nb, struct tegra_pmc, clk_nb);
+ struct clk_notifier_data *data = ptr;
+
+ switch (action) {
+ case PRE_RATE_CHANGE:
+ mutex_lock(&pmc->powergates_lock);
+ break;
+
+ case POST_RATE_CHANGE:
+ pmc->rate = data->new_rate;
+ fallthrough;
+
+ case ABORT_RATE_CHANGE:
+ mutex_unlock(&pmc->powergates_lock);
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ return notifier_from_errno(-EINVAL);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void pmc_clk_fence_udelay(u32 offset)
+{
+ tegra_pmc_readl(pmc, offset);
+ /* pmc clk propagation delay 2 us */
+ udelay(2);
+}
+
+static u8 pmc_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) >> clk->mux_shift;
+ val &= PMC_CLK_OUT_MUX_MASK;
+
+ return val;
+}
+
+static int pmc_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs);
+ val &= ~(PMC_CLK_OUT_MUX_MASK << clk->mux_shift);
+ val |= index << clk->mux_shift;
+ tegra_pmc_writel(pmc, val, clk->offs);
+ pmc_clk_fence_udelay(clk->offs);
+
+ return 0;
+}
+
+static int pmc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, clk->offs) & BIT(clk->force_en_shift);
+
+ return val ? 1 : 0;
+}
+
+static void pmc_clk_set_state(unsigned long offs, u32 shift, int state)
+{
+ u32 val;
+
+ val = tegra_pmc_readl(pmc, offs);
+ val = state ? (val | BIT(shift)) : (val & ~BIT(shift));
+ tegra_pmc_writel(pmc, val, offs);
+ pmc_clk_fence_udelay(offs);
+}
+
+static int pmc_clk_enable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_disable(struct clk_hw *hw)
+{
+ struct pmc_clk *clk = to_pmc_clk(hw);
+
+ pmc_clk_set_state(clk->offs, clk->force_en_shift, 0);
+}
+
+static const struct clk_ops pmc_clk_ops = {
+ .get_parent = pmc_clk_mux_get_parent,
+ .set_parent = pmc_clk_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+ .is_enabled = pmc_clk_is_enabled,
+ .enable = pmc_clk_enable,
+ .disable = pmc_clk_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_out_register(struct tegra_pmc *pmc,
+ const struct pmc_clk_init_data *data,
+ unsigned long offset)
+{
+ struct clk_init_data init;
+ struct pmc_clk *pmc_clk;
+
+ pmc_clk = devm_kzalloc(pmc->dev, sizeof(*pmc_clk), GFP_KERNEL);
+ if (!pmc_clk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = data->name;
+ init.ops = &pmc_clk_ops;
+ init.parent_names = data->parents;
+ init.num_parents = data->num_parents;
+ init.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT |
+ CLK_SET_PARENT_GATE;
+
+ pmc_clk->hw.init = &init;
+ pmc_clk->offs = offset;
+ pmc_clk->mux_shift = data->mux_shift;
+ pmc_clk->force_en_shift = data->force_en_shift;
+
+ return clk_register(NULL, &pmc_clk->hw);
+}
+
+static int pmc_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ return tegra_pmc_readl(pmc, gate->offs) & BIT(gate->shift) ? 1 : 0;
+}
+
+static int pmc_clk_gate_enable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 1);
+
+ return 0;
+}
+
+static void pmc_clk_gate_disable(struct clk_hw *hw)
+{
+ struct pmc_clk_gate *gate = to_pmc_clk_gate(hw);
+
+ pmc_clk_set_state(gate->offs, gate->shift, 0);
+}
+
+static const struct clk_ops pmc_clk_gate_ops = {
+ .is_enabled = pmc_clk_gate_is_enabled,
+ .enable = pmc_clk_gate_enable,
+ .disable = pmc_clk_gate_disable,
+};
+
+static struct clk *
+tegra_pmc_clk_gate_register(struct tegra_pmc *pmc, const char *name,
+ const char *parent_name, unsigned long offset,
+ u32 shift)
+{
+ struct clk_init_data init;
+ struct pmc_clk_gate *gate;
+
+ gate = devm_kzalloc(pmc->dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pmc_clk_gate_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ gate->hw.init = &init;
+ gate->offs = offset;
+ gate->shift = shift;
+
+ return clk_register(NULL, &gate->hw);
+}
+
+static void tegra_pmc_clock_register(struct tegra_pmc *pmc,
+ struct device_node *np)
+{
+ struct clk *clk;
+ struct clk_onecell_data *clk_data;
+ unsigned int num_clks;
+ int i, err;
+
+ num_clks = pmc->soc->num_pmc_clks;
+ if (pmc->soc->has_blink_output)
+ num_clks += 1;
+
+ if (!num_clks)
+ return;
+
+ clk_data = devm_kmalloc(pmc->dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->clks = devm_kcalloc(pmc->dev, TEGRA_PMC_CLK_MAX,
+ sizeof(*clk_data->clks), GFP_KERNEL);
+ if (!clk_data->clks)
+ return;
+
+ clk_data->clk_num = TEGRA_PMC_CLK_MAX;
+
+ for (i = 0; i < TEGRA_PMC_CLK_MAX; i++)
+ clk_data->clks[i] = ERR_PTR(-ENOENT);
+
+ for (i = 0; i < pmc->soc->num_pmc_clks; i++) {
+ const struct pmc_clk_init_data *data;
+
+ data = pmc->soc->pmc_clks_data + i;
+
+ clk = tegra_pmc_clk_out_register(pmc, data, PMC_CLK_OUT_CNTRL);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev, "unable to register clock %s: %d\n",
+ data->name, PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, data->name, NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register %s clock lookup: %d\n",
+ data->name, err);
+ return;
+ }
+
+ clk_data->clks[data->clk_id] = clk;
+ }
+
+ if (pmc->soc->has_blink_output) {
+ tegra_pmc_writel(pmc, 0x0, PMC_BLINK_TIMER);
+ clk = tegra_pmc_clk_gate_register(pmc,
+ "pmc_blink_override",
+ "clk_32k",
+ PMC_DPD_PADS_ORIDE,
+ PMC_DPD_PADS_ORIDE_BLINK);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink_override: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ clk = tegra_pmc_clk_gate_register(pmc, "pmc_blink",
+ "pmc_blink_override",
+ PMC_CNTRL,
+ PMC_CNTRL_BLINK_EN);
+ if (IS_ERR(clk)) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink: %d\n",
+ PTR_ERR_OR_ZERO(clk));
+ return;
+ }
+
+ err = clk_register_clkdev(clk, "pmc_blink", NULL);
+ if (err) {
+ dev_warn(pmc->dev,
+ "unable to register pmc_blink lookup: %d\n",
+ err);
+ return;
+ }
+
+ clk_data->clks[TEGRA_PMC_CLK_BLINK] = clk;
+ }
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+ if (err)
+ dev_warn(pmc->dev, "failed to add pmc clock provider: %d\n",
+ err);
+}
+
static int tegra_pmc_probe(struct platform_device *pdev)
{
void __iomem *base;
@@ -2108,6 +2561,23 @@
pmc->clk = NULL;
}
+ /*
+ * PCLK clock rate can't be retrieved using CLK API because it
+ * causes lockup if CPU enters LP2 idle state from some other
+ * CLK notifier, hence we're caching the rate's value locally.
+ */
+ if (pmc->clk) {
+ pmc->clk_nb.notifier_call = tegra_pmc_clk_notify_cb;
+ err = clk_notifier_register(pmc->clk, &pmc->clk_nb);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to register clk notifier\n");
+ return err;
+ }
+
+ pmc->rate = clk_get_rate(pmc->clk);
+ }
+
pmc->dev = &pdev->dev;
tegra_pmc_init(pmc);
@@ -2146,6 +2616,7 @@
pmc->base = base;
mutex_unlock(&pmc->powergates_lock);
+ tegra_pmc_clock_register(pmc, pdev->dev.of_node);
platform_set_drvdata(pdev, pmc);
return 0;
@@ -2159,6 +2630,8 @@
cleanup_sysfs:
device_remove_file(&pdev->dev, &dev_attr_reset_reason);
device_remove_file(&pdev->dev, &dev_attr_reset_level);
+ clk_notifier_unregister(pmc->clk, &pmc->clk_nb);
+
return err;
}
@@ -2210,7 +2683,7 @@
static void tegra20_pmc_init(struct tegra_pmc *pmc)
{
- u32 value;
+ u32 value, osc, pmu, off;
/* Always enable CPU power request */
value = tegra_pmc_readl(pmc, PMC_CNTRL);
@@ -2224,6 +2697,11 @@
else
value |= PMC_CNTRL_SYSCLK_POLARITY;
+ if (pmc->corereq_high)
+ value &= ~PMC_CNTRL_PWRREQ_POLARITY;
+ else
+ value |= PMC_CNTRL_PWRREQ_POLARITY;
+
/* configure the output polarity while the request is tristated */
tegra_pmc_writel(pmc, value, PMC_CNTRL);
@@ -2231,6 +2709,16 @@
value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_SYSCLK_OE;
tegra_pmc_writel(pmc, value, PMC_CNTRL);
+
+ /* program core timings which are applicable only for suspend state */
+ if (pmc->suspend_mode != TEGRA_SUSPEND_NONE) {
+ osc = DIV_ROUND_UP(pmc->core_osc_time * 8192, 1000000);
+ pmu = DIV_ROUND_UP(pmc->core_pmu_time * 32768, 1000000);
+ off = DIV_ROUND_UP(pmc->core_off_time * 32768, 1000000);
+ tegra_pmc_writel(pmc, ((osc << 8) & 0xff00) | (pmu & 0xff),
+ PMC_COREPWRGOOD_TIMER);
+ tegra_pmc_writel(pmc, off, PMC_COREPWROFF_TIMER);
+ }
}
static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -2266,10 +2754,14 @@
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra20_powergate_set,
.reset_sources = NULL,
.num_reset_sources = 0,
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = true,
};
static const char * const tegra30_powergates[] = {
@@ -2296,6 +2788,14 @@
TEGRA_POWERGATE_CPU3,
};
+static const char * const tegra30_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
+ "LP0"
+};
+
static const struct tegra_pmc_soc tegra30_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra30_powergates),
.powergates = tegra30_powergates,
@@ -2313,10 +2813,14 @@
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra20_powergate_set,
.reset_sources = tegra30_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra114_powergates[] = {
@@ -2364,10 +2868,14 @@
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
.reset_sources = tegra30_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra124_powergates[] = {
@@ -2417,38 +2925,38 @@
.name = (_name) \
})
-#define TEGRA124_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
- _pad(TEGRA_IO_PAD_BB, 15, UINT_MAX, "bb"), \
- _pad(TEGRA_IO_PAD_CAM, 36, UINT_MAX, "cam"), \
- _pad(TEGRA_IO_PAD_COMP, 22, UINT_MAX, "comp"), \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csb"), \
- _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "cse"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_HV, 38, UINT_MAX, "hv"), \
- _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_NAND, 13, UINT_MAX, "nand"), \
- _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC1, 33, UINT_MAX, "sdmmc1"), \
- _pad(TEGRA_IO_PAD_SDMMC3, 34, UINT_MAX, "sdmmc3"), \
- _pad(TEGRA_IO_PAD_SDMMC4, 35, UINT_MAX, "sdmmc4"), \
- _pad(TEGRA_IO_PAD_SYS_DDC, 58, UINT_MAX, "sys_ddc"), \
- _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb_bias")
+#define TEGRA124_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_BB, 15, UINT_MAX, "bb"), \
+ _pad(TEGRA_IO_PAD_CAM, 36, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_COMP, 22, UINT_MAX, "comp"), \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csb"), \
+ _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "cse"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_HV, 38, UINT_MAX, "hv"), \
+ _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_NAND, 13, UINT_MAX, "nand"), \
+ _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC1, 33, UINT_MAX, "sdmmc1"), \
+ _pad(TEGRA_IO_PAD_SDMMC3, 34, UINT_MAX, "sdmmc3"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 35, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_SYS_DDC, 58, UINT_MAX, "sys_ddc"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb_bias")
static const struct tegra_io_pad_soc tegra124_io_pads[] = {
TEGRA124_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2475,10 +2983,14 @@
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
.reset_sources = tegra30_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
static const char * const tegra210_powergates[] = {
@@ -2515,46 +3027,46 @@
TEGRA_POWERGATE_CPU3,
};
-#define TEGRA210_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_AUDIO, 17, 5, "audio"), \
- _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 18, "audio-hv"), \
- _pad(TEGRA_IO_PAD_CAM, 36, 10, "cam"), \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
- _pad(TEGRA_IO_PAD_CSIC, 42, UINT_MAX, "csic"), \
- _pad(TEGRA_IO_PAD_CSID, 43, UINT_MAX, "csid"), \
- _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "csie"), \
- _pad(TEGRA_IO_PAD_CSIF, 45, UINT_MAX, "csif"), \
- _pad(TEGRA_IO_PAD_DBG, 25, 19, "dbg"), \
- _pad(TEGRA_IO_PAD_DEBUG_NONAO, 26, UINT_MAX, "debug-nonao"), \
- _pad(TEGRA_IO_PAD_DMIC, 50, 20, "dmic"), \
- _pad(TEGRA_IO_PAD_DP, 51, UINT_MAX, "dp"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_EMMC, 35, UINT_MAX, "emmc"), \
- _pad(TEGRA_IO_PAD_EMMC2, 37, UINT_MAX, "emmc2"), \
- _pad(TEGRA_IO_PAD_GPIO, 27, 21, "gpio"), \
- _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, UINT_MAX, 11, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC1, 33, 12, "sdmmc1"), \
- _pad(TEGRA_IO_PAD_SDMMC3, 34, 13, "sdmmc3"), \
- _pad(TEGRA_IO_PAD_SPI, 46, 22, "spi"), \
- _pad(TEGRA_IO_PAD_SPI_HV, 47, 23, "spi-hv"), \
- _pad(TEGRA_IO_PAD_UART, 14, 2, "uart"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB3, 18, UINT_MAX, "usb3"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias")
+#define TEGRA210_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, 5, "audio"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 18, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_CAM, 36, 10, "cam"), \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_CSIC, 42, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 43, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 44, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 45, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, 19, "dbg"), \
+ _pad(TEGRA_IO_PAD_DEBUG_NONAO, 26, UINT_MAX, "debug-nonao"), \
+ _pad(TEGRA_IO_PAD_DMIC, 50, 20, "dmic"), \
+ _pad(TEGRA_IO_PAD_DP, 51, UINT_MAX, "dp"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_DSIB, 39, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 40, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 41, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_EMMC, 35, UINT_MAX, "emmc"), \
+ _pad(TEGRA_IO_PAD_EMMC2, 37, UINT_MAX, "emmc2"), \
+ _pad(TEGRA_IO_PAD_GPIO, 27, 21, "gpio"), \
+ _pad(TEGRA_IO_PAD_HDMI, 28, UINT_MAX, "hdmi"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_LVDS, 57, UINT_MAX, "lvds"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_BIAS, 4, UINT_MAX, "pex-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 5, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, UINT_MAX, 11, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC1, 33, 12, "sdmmc1"), \
+ _pad(TEGRA_IO_PAD_SDMMC3, 34, 13, "sdmmc3"), \
+ _pad(TEGRA_IO_PAD_SPI, 46, 22, "spi"), \
+ _pad(TEGRA_IO_PAD_SPI_HV, 47, 23, "spi-hv"), \
+ _pad(TEGRA_IO_PAD_UART, 14, 2, "uart"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB3, 18, UINT_MAX, "usb3"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias")
static const struct tegra_io_pad_soc tegra210_io_pads[] = {
TEGRA210_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2564,6 +3076,20 @@
TEGRA210_IO_PAD_TABLE(TEGRA_IO_PIN_DESC)
};
+static const char * const tegra210_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
+ "LP0",
+ "AOTAG"
+};
+
+static const struct tegra_wake_event tegra210_wake_events[] = {
+ TEGRA_WAKE_IRQ("rtc", 16, 2),
+ TEGRA_WAKE_IRQ("pmu", 51, 86),
+};
+
static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_powergates = ARRAY_SIZE(tegra210_powergates),
.powergates = tegra210_powergates,
@@ -2581,52 +3107,60 @@
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
+ .powergate_set = tegra114_powergate_set,
+ .irq_set_wake = tegra210_pmc_irq_set_wake,
+ .irq_set_type = tegra210_pmc_irq_set_type,
.reset_sources = tegra210_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra210_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
+ .num_wake_events = ARRAY_SIZE(tegra210_wake_events),
+ .wake_events = tegra210_wake_events,
+ .pmc_clks_data = tegra_pmc_clks_data,
+ .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
+ .has_blink_output = true,
};
-#define TEGRA186_IO_PAD_TABLE(_pad) \
- /* .id .dpd .voltage .name */ \
- _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
- _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
- _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
- _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
- _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
- _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
- _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
- _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
- _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
- _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
- _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias"), \
- _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
- _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
- _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
- _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
- _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
- _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
- _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
- _pad(TEGRA_IO_PAD_SDMMC2_HV, 34, 5, "sdmmc2-hv"), \
- _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
- _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
- _pad(TEGRA_IO_PAD_DSIB, 40, UINT_MAX, "dsib"), \
- _pad(TEGRA_IO_PAD_DSIC, 41, UINT_MAX, "dsic"), \
- _pad(TEGRA_IO_PAD_DSID, 42, UINT_MAX, "dsid"), \
- _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
- _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
- _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
- _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
- _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
- _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
- _pad(TEGRA_IO_PAD_DMIC_HV, 52, 2, "dmic-hv"), \
- _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
- _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
- _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
- _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
- _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
- _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
+#define TEGRA186_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_DSI, 2, UINT_MAX, "dsi"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_USB0, 9, UINT_MAX, "usb0"), \
+ _pad(TEGRA_IO_PAD_USB1, 10, UINT_MAX, "usb1"), \
+ _pad(TEGRA_IO_PAD_USB2, 11, UINT_MAX, "usb2"), \
+ _pad(TEGRA_IO_PAD_USB_BIAS, 12, UINT_MAX, "usb-bias"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_HSIC, 19, UINT_MAX, "hsic"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_SDMMC2_HV, 34, 5, "sdmmc2-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_DSIB, 40, UINT_MAX, "dsib"), \
+ _pad(TEGRA_IO_PAD_DSIC, 41, UINT_MAX, "dsic"), \
+ _pad(TEGRA_IO_PAD_DSID, 42, UINT_MAX, "dsid"), \
+ _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
+ _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
+ _pad(TEGRA_IO_PAD_DMIC_HV, 52, 2, "dmic-hv"), \
+ _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
+ _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
+ _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
static const struct tegra_io_pad_soc tegra186_io_pads[] = {
TEGRA186_IO_PAD_TABLE(TEGRA_IO_PAD)
@@ -2644,7 +3178,7 @@
.dpd2_status = 0x80,
.rst_status = 0x70,
.rst_source_shift = 0x2,
- .rst_source_mask = 0x3C,
+ .rst_source_mask = 0x3c,
.rst_level_shift = 0x0,
.rst_level_mask = 0x3,
};
@@ -2666,7 +3200,7 @@
of_address_to_resource(np, index, ®s);
- wake = ioremap_nocache(regs.start, resource_size(®s));
+ wake = ioremap(regs.start, resource_size(®s));
if (!wake) {
dev_err(pmc->dev, "failed to map PMC wake registers\n");
return;
@@ -2684,7 +3218,30 @@
iounmap(wake);
}
+static const char * const tegra186_reset_sources[] = {
+ "SYS_RESET",
+ "AOWDT",
+ "MCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "BCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "SWREST",
+ "SC7",
+ "HSM",
+ "CORESIGHT"
+};
+
+static const char * const tegra186_reset_levels[] = {
+ "L0", "L1", "L2", "WARM"
+};
+
static const struct tegra_wake_event tegra186_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
@@ -2706,65 +3263,118 @@
.regs = &tegra186_pmc_regs,
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
.reset_sources = tegra186_reset_sources,
.num_reset_sources = ARRAY_SIZE(tegra186_reset_sources),
.reset_levels = tegra186_reset_levels,
.num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra186_wake_events),
.wake_events = tegra186_wake_events,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
};
+#define TEGRA194_IO_PAD_TABLE(_pad) \
+ /* .id .dpd .voltage .name */ \
+ _pad(TEGRA_IO_PAD_CSIA, 0, UINT_MAX, "csia"), \
+ _pad(TEGRA_IO_PAD_CSIB, 1, UINT_MAX, "csib"), \
+ _pad(TEGRA_IO_PAD_MIPI_BIAS, 3, UINT_MAX, "mipi-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_BIAS, 4, UINT_MAX, "pex-clk-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK3, 5, UINT_MAX, "pex-clk3"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK2, 6, UINT_MAX, "pex-clk2"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK1, 7, UINT_MAX, "pex-clk1"), \
+ _pad(TEGRA_IO_PAD_EQOS, 8, UINT_MAX, "eqos"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_2_BIAS, 9, UINT_MAX, "pex-clk-2-bias"), \
+ _pad(TEGRA_IO_PAD_PEX_CLK_2, 10, UINT_MAX, "pex-clk-2"), \
+ _pad(TEGRA_IO_PAD_DAP3, 11, UINT_MAX, "dap3"), \
+ _pad(TEGRA_IO_PAD_DAP5, 12, UINT_MAX, "dap5"), \
+ _pad(TEGRA_IO_PAD_UART, 14, UINT_MAX, "uart"), \
+ _pad(TEGRA_IO_PAD_PWR_CTL, 15, UINT_MAX, "pwr-ctl"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO53, 16, UINT_MAX, "soc-gpio53"), \
+ _pad(TEGRA_IO_PAD_AUDIO, 17, UINT_MAX, "audio"), \
+ _pad(TEGRA_IO_PAD_GP_PWM2, 18, UINT_MAX, "gp-pwm2"), \
+ _pad(TEGRA_IO_PAD_GP_PWM3, 19, UINT_MAX, "gp-pwm3"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO12, 20, UINT_MAX, "soc-gpio12"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO13, 21, UINT_MAX, "soc-gpio13"), \
+ _pad(TEGRA_IO_PAD_SOC_GPIO10, 22, UINT_MAX, "soc-gpio10"), \
+ _pad(TEGRA_IO_PAD_UART4, 23, UINT_MAX, "uart4"), \
+ _pad(TEGRA_IO_PAD_UART5, 24, UINT_MAX, "uart5"), \
+ _pad(TEGRA_IO_PAD_DBG, 25, UINT_MAX, "dbg"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP3, 26, UINT_MAX, "hdmi-dp3"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP2, 27, UINT_MAX, "hdmi-dp2"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP0, 28, UINT_MAX, "hdmi-dp0"), \
+ _pad(TEGRA_IO_PAD_HDMI_DP1, 29, UINT_MAX, "hdmi-dp1"), \
+ _pad(TEGRA_IO_PAD_PEX_CNTRL, 32, UINT_MAX, "pex-cntrl"), \
+ _pad(TEGRA_IO_PAD_PEX_CTL2, 33, UINT_MAX, "pex-ctl2"), \
+ _pad(TEGRA_IO_PAD_PEX_L0_RST_N, 34, UINT_MAX, "pex-l0-rst"), \
+ _pad(TEGRA_IO_PAD_PEX_L1_RST_N, 35, UINT_MAX, "pex-l1-rst"), \
+ _pad(TEGRA_IO_PAD_SDMMC4, 36, UINT_MAX, "sdmmc4"), \
+ _pad(TEGRA_IO_PAD_PEX_L5_RST_N, 37, UINT_MAX, "pex-l5-rst"), \
+ _pad(TEGRA_IO_PAD_CAM, 38, UINT_MAX, "cam"), \
+ _pad(TEGRA_IO_PAD_CSIC, 43, UINT_MAX, "csic"), \
+ _pad(TEGRA_IO_PAD_CSID, 44, UINT_MAX, "csid"), \
+ _pad(TEGRA_IO_PAD_CSIE, 45, UINT_MAX, "csie"), \
+ _pad(TEGRA_IO_PAD_CSIF, 46, UINT_MAX, "csif"), \
+ _pad(TEGRA_IO_PAD_SPI, 47, UINT_MAX, "spi"), \
+ _pad(TEGRA_IO_PAD_UFS, 49, UINT_MAX, "ufs"), \
+ _pad(TEGRA_IO_PAD_CSIG, 50, UINT_MAX, "csig"), \
+ _pad(TEGRA_IO_PAD_CSIH, 51, UINT_MAX, "csih"), \
+ _pad(TEGRA_IO_PAD_EDP, 53, UINT_MAX, "edp"), \
+ _pad(TEGRA_IO_PAD_SDMMC1_HV, 55, 4, "sdmmc1-hv"), \
+ _pad(TEGRA_IO_PAD_SDMMC3_HV, 56, 6, "sdmmc3-hv"), \
+ _pad(TEGRA_IO_PAD_CONN, 60, UINT_MAX, "conn"), \
+ _pad(TEGRA_IO_PAD_AUDIO_HV, 61, 1, "audio-hv"), \
+ _pad(TEGRA_IO_PAD_AO_HV, UINT_MAX, 0, "ao-hv")
+
static const struct tegra_io_pad_soc tegra194_io_pads[] = {
- { .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK_BIAS, .dpd = 4, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK3, .dpd = 5, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 7, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_EQOS, .dpd = 8, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2_BIAS, .dpd = 9, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 10, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DAP3, .dpd = 11, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DAP5, .dpd = 12, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PWR_CTL, .dpd = 15, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO53, .dpd = 16, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_GP_PWM2, .dpd = 18, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_GP_PWM3, .dpd = 19, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO12, .dpd = 20, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO13, .dpd = 21, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SOC_GPIO10, .dpd = 22, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART4, .dpd = 23, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UART5, .dpd = 24, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP3, .dpd = 26, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP2, .dpd = 27, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP0, .dpd = 28, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_HDMI_DP1, .dpd = 29, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_CTL2, .dpd = 33, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L0_RST_N, .dpd = 34, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L1_RST_N, .dpd = 35, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC4, .dpd = 36, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_PEX_L5_RST_N, .dpd = 37, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIC, .dpd = 43, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSID, .dpd = 44, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIE, .dpd = 45, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIF, .dpd = 46, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SPI, .dpd = 47, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_UFS, .dpd = 49, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIG, .dpd = 50, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CSIH, .dpd = 51, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_EDP, .dpd = 53, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC1_HV, .dpd = 55, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_SDMMC3_HV, .dpd = 56, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_CONN, .dpd = 60, .voltage = UINT_MAX },
- { .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = UINT_MAX },
+ TEGRA194_IO_PAD_TABLE(TEGRA_IO_PAD)
+};
+
+static const struct pinctrl_pin_desc tegra194_pin_descs[] = {
+ TEGRA194_IO_PAD_TABLE(TEGRA_IO_PIN_DESC)
+};
+
+static const struct tegra_pmc_regs tegra194_pmc_regs = {
+ .scratch0 = 0x2000,
+ .dpd_req = 0x74,
+ .dpd_status = 0x78,
+ .dpd2_req = 0x7c,
+ .dpd2_status = 0x80,
+ .rst_status = 0x70,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0x7c,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static const char * const tegra194_reset_sources[] = {
+ "SYS_RESET_N",
+ "AOWDT",
+ "BCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "LCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "MAINSWRST",
+ "SC7",
+ "HSM",
+ "CSITE",
+ "RCEWDT",
+ "PVA0WDT",
+ "PVA1WDT",
+ "L1A_ASYNC",
+ "BPMPBOOT",
+ "FUSECRC",
};
static const struct tegra_wake_event tegra194_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
@@ -2777,18 +3387,97 @@
.has_tsense_reset = false,
.has_gpu_clamps = false,
.needs_mbist_war = false,
- .has_impl_33v_pwr = false,
+ .has_impl_33v_pwr = true,
.maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra194_io_pads),
.io_pads = tegra194_io_pads,
- .regs = &tegra186_pmc_regs,
+ .num_pin_descs = ARRAY_SIZE(tegra194_pin_descs),
+ .pin_descs = tegra194_pin_descs,
+ .regs = &tegra194_pmc_regs,
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra194_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra194_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra194_wake_events),
.wake_events = tegra194_wake_events,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
+};
+
+static const struct tegra_pmc_regs tegra234_pmc_regs = {
+ .scratch0 = 0x2000,
+ .dpd_req = 0,
+ .dpd_status = 0,
+ .dpd2_req = 0,
+ .dpd2_status = 0,
+ .rst_status = 0x70,
+ .rst_source_shift = 0x2,
+ .rst_source_mask = 0xfc,
+ .rst_level_shift = 0x0,
+ .rst_level_mask = 0x3,
+};
+
+static const char * const tegra234_reset_sources[] = {
+ "SYS_RESET_N",
+ "AOWDT",
+ "BCCPLEXWDT",
+ "BPMPWDT",
+ "SCEWDT",
+ "SPEWDT",
+ "APEWDT",
+ "LCCPLEXWDT",
+ "SENSOR",
+ "AOTAG",
+ "VFSENSOR",
+ "MAINSWRST",
+ "SC7",
+ "HSM",
+ "CSITE",
+ "RCEWDT",
+ "PVA0WDT",
+ "PVA1WDT",
+ "L1A_ASYNC",
+ "BPMPBOOT",
+ "FUSECRC",
+};
+
+static const struct tegra_pmc_soc tegra234_pmc_soc = {
+ .num_powergates = 0,
+ .powergates = NULL,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+ .has_tsense_reset = false,
+ .has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
+ .num_io_pads = 0,
+ .io_pads = NULL,
+ .num_pin_descs = 0,
+ .pin_descs = NULL,
+ .regs = &tegra234_pmc_regs,
+ .init = NULL,
+ .setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
+ .irq_set_wake = tegra186_pmc_irq_set_wake,
+ .irq_set_type = tegra186_pmc_irq_set_type,
+ .reset_sources = tegra234_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra234_reset_sources),
+ .reset_levels = tegra186_reset_levels,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
+ .num_wake_events = 0,
+ .wake_events = NULL,
+ .pmc_clks_data = NULL,
+ .num_pmc_clks = 0,
+ .has_blink_output = false,
};
static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra234-pmc", .data = &tegra234_pmc_soc },
{ .compatible = "nvidia,tegra194-pmc", .data = &tegra194_pmc_soc },
{ .compatible = "nvidia,tegra186-pmc", .data = &tegra186_pmc_soc },
{ .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc },
@@ -2892,7 +3581,7 @@
}
}
- pmc->base = ioremap_nocache(regs.start, resource_size(®s));
+ pmc->base = ioremap(regs.start, resource_size(®s));
if (!pmc->base) {
pr_err("failed to map PMC registers\n");
of_node_put(np);
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
new file mode 100644
index 0000000..367a71a
--- /dev/null
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Voltage regulators coupler for NVIDIA Tegra20
+ * Copyright (C) 2019 GRATE-DRIVER project
+ *
+ * Voltage constraints borrowed from downstream kernel sources
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ */
+
+#define pr_fmt(fmt) "tegra voltage-coupler: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+struct tegra_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *core_rdev;
+ struct regulator_dev *cpu_rdev;
+ struct regulator_dev *rtc_rdev;
+ int core_min_uV;
+};
+
+static inline struct tegra_regulator_coupler *
+to_tegra_coupler(struct regulator_coupler *coupler)
+{
+ return container_of(coupler, struct tegra_regulator_coupler, coupler);
+}
+
+static int tegra20_core_limit(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV = 0;
+ int core_max_uV;
+ int core_cur_uV;
+ int err;
+
+ if (tegra->core_min_uV > 0)
+ return tegra->core_min_uV;
+
+ core_cur_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_cur_uV < 0)
+ return core_cur_uV;
+
+ core_max_uV = max(core_cur_uV, 1200000);
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ /*
+ * Limit minimum CORE voltage to a value left from bootloader or,
+ * if it's unreasonably low value, to the most common 1.2v or to
+ * whatever maximum value defined via board's device-tree.
+ */
+ tegra->core_min_uV = core_max_uV;
+
+ pr_info("core minimum voltage limited to %duV\n", tegra->core_min_uV);
+
+ return tegra->core_min_uV;
+}
+
+static int tegra20_core_rtc_max_spread(struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ struct coupling_desc *c_desc = &core_rdev->coupling_desc;
+ struct regulator_dev *rdev;
+ int max_spread;
+ unsigned int i;
+
+ for (i = 1; i < c_desc->n_coupled; i++) {
+ max_spread = core_rdev->constraints->max_spread[i - 1];
+ rdev = c_desc->coupled_rdevs[i];
+
+ if (rdev == rtc_rdev && max_spread)
+ return max_spread;
+ }
+
+ pr_err_once("rtc-core max-spread is undefined in device-tree\n");
+
+ return 150000;
+}
+
+static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev,
+ int cpu_uV, int cpu_min_uV)
+{
+ int core_min_uV, core_max_uV = INT_MAX;
+ int rtc_min_uV, rtc_max_uV = INT_MAX;
+ int core_target_uV;
+ int rtc_target_uV;
+ int max_spread;
+ int core_uV;
+ int rtc_uV;
+ int err;
+
+ /*
+ * RTC and CORE voltages should be no more than 170mV from each other,
+ * CPU should be below RTC and CORE by at least 120mV. This applies
+ * to all Tegra20 SoC's.
+ */
+ max_spread = tegra20_core_rtc_max_spread(core_rdev, rtc_rdev);
+
+ /*
+ * The core voltage scaling is currently not hooked up in drivers,
+ * hence we will limit the minimum core voltage to a reasonable value.
+ * This should be good enough for the time being.
+ */
+ core_min_uV = tegra20_core_limit(tegra, core_rdev);
+ if (core_min_uV < 0)
+ return core_min_uV;
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(core_rdev, &core_min_uV, &core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_uV < 0)
+ return core_uV;
+
+ core_min_uV = max(cpu_min_uV + 125000, core_min_uV);
+ if (core_min_uV > core_max_uV)
+ return -EINVAL;
+
+ if (cpu_uV + 120000 > core_uV)
+ pr_err("core-cpu voltage constraint violated: %d %d\n",
+ core_uV, cpu_uV + 120000);
+
+ rtc_uV = regulator_get_voltage_rdev(rtc_rdev);
+ if (rtc_uV < 0)
+ return rtc_uV;
+
+ if (cpu_uV + 120000 > rtc_uV)
+ pr_err("rtc-cpu voltage constraint violated: %d %d\n",
+ rtc_uV, cpu_uV + 120000);
+
+ if (abs(core_uV - rtc_uV) > 170000)
+ pr_err("core-rtc voltage constraint violated: %d %d\n",
+ core_uV, rtc_uV);
+
+ rtc_min_uV = max(cpu_min_uV + 125000, core_min_uV - max_spread);
+
+ err = regulator_check_voltage(rtc_rdev, &rtc_min_uV, &rtc_max_uV);
+ if (err)
+ return err;
+
+ while (core_uV != core_min_uV || rtc_uV != rtc_min_uV) {
+ if (core_uV < core_min_uV) {
+ core_target_uV = min(core_uV + max_spread, core_min_uV);
+ core_target_uV = min(rtc_uV + max_spread, core_target_uV);
+ } else {
+ core_target_uV = max(core_uV - max_spread, core_min_uV);
+ core_target_uV = max(rtc_uV - max_spread, core_target_uV);
+ }
+
+ if (core_uV == core_target_uV)
+ goto update_rtc;
+
+ err = regulator_set_voltage_rdev(core_rdev,
+ core_target_uV,
+ core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = core_target_uV;
+update_rtc:
+ if (rtc_uV < rtc_min_uV) {
+ rtc_target_uV = min(rtc_uV + max_spread, rtc_min_uV);
+ rtc_target_uV = min(core_uV + max_spread, rtc_target_uV);
+ } else {
+ rtc_target_uV = max(rtc_uV - max_spread, rtc_min_uV);
+ rtc_target_uV = max(core_uV - max_spread, rtc_target_uV);
+ }
+
+ if (rtc_uV == rtc_target_uV)
+ continue;
+
+ err = regulator_set_voltage_rdev(rtc_rdev,
+ rtc_target_uV,
+ rtc_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ rtc_uV = rtc_target_uV;
+ }
+
+ return 0;
+}
+
+static int tegra20_core_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ int cpu_uV;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ return tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_uV);
+}
+
+static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev,
+ struct regulator_dev *rtc_rdev)
+{
+ int cpu_min_uV_consumers = 0;
+ int cpu_max_uV = INT_MAX;
+ int cpu_min_uV = 0;
+ int cpu_uV;
+ int err;
+
+ err = regulator_check_voltage(cpu_rdev, &cpu_min_uV, &cpu_max_uV);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV, &cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV_consumers,
+ &cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ /*
+ * CPU's regulator may not have any consumers, hence the voltage
+ * must not be changed in that case because CPU simply won't
+ * survive the voltage drop if it's running on a higher frequency.
+ */
+ if (!cpu_min_uV_consumers)
+ cpu_min_uV = cpu_uV;
+
+ if (cpu_min_uV > cpu_uV) {
+ err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_min_uV);
+ if (err)
+ return err;
+
+ err = regulator_set_voltage_rdev(cpu_rdev, cpu_min_uV,
+ cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+ } else if (cpu_min_uV < cpu_uV) {
+ err = regulator_set_voltage_rdev(cpu_rdev, cpu_min_uV,
+ cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
+ cpu_uV, cpu_min_uV);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct regulator_dev *core_rdev = tegra->core_rdev;
+ struct regulator_dev *cpu_rdev = tegra->cpu_rdev;
+ struct regulator_dev *rtc_rdev = tegra->rtc_rdev;
+
+ if ((core_rdev != rdev && cpu_rdev != rdev && rtc_rdev != rdev) ||
+ state != PM_SUSPEND_ON) {
+ pr_err("regulators are not coupled properly\n");
+ return -EINVAL;
+ }
+
+ if (rdev == cpu_rdev)
+ return tegra20_cpu_voltage_update(tegra, cpu_rdev,
+ core_rdev, rtc_rdev);
+
+ if (rdev == core_rdev)
+ return tegra20_core_voltage_update(tegra, cpu_rdev,
+ core_rdev, rtc_rdev);
+
+ pr_err("changing %s voltage not permitted\n", rdev_get_name(rtc_rdev));
+
+ return -EPERM;
+}
+
+static int tegra20_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct device_node *np = rdev->dev.of_node;
+
+ if (of_property_read_bool(np, "nvidia,tegra-core-regulator") &&
+ !tegra->core_rdev) {
+ tegra->core_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-rtc-regulator") &&
+ !tegra->rtc_rdev) {
+ tegra->rtc_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-cpu-regulator") &&
+ !tegra->cpu_rdev) {
+ tegra->cpu_rdev = rdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra20_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+
+ if (tegra->core_rdev == rdev) {
+ tegra->core_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->rtc_rdev == rdev) {
+ tegra->rtc_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->cpu_rdev == rdev) {
+ tegra->cpu_rdev = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct tegra_regulator_coupler tegra20_coupler = {
+ .coupler = {
+ .attach_regulator = tegra20_regulator_attach,
+ .detach_regulator = tegra20_regulator_detach,
+ .balance_voltage = tegra20_regulator_balance_voltage,
+ },
+};
+
+static int __init tegra_regulator_coupler_init(void)
+{
+ if (!of_machine_is_compatible("nvidia,tegra20"))
+ return 0;
+
+ return regulator_coupler_register(&tegra20_coupler.coupler);
+}
+arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
new file mode 100644
index 0000000..0e776b2
--- /dev/null
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Voltage regulators coupler for NVIDIA Tegra30
+ * Copyright (C) 2019 GRATE-DRIVER project
+ *
+ * Voltage constraints borrowed from downstream kernel sources
+ * Copyright (C) 2010-2011 NVIDIA Corporation
+ */
+
+#define pr_fmt(fmt) "tegra voltage-coupler: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/regulator/coupler.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <soc/tegra/fuse.h>
+
+struct tegra_regulator_coupler {
+ struct regulator_coupler coupler;
+ struct regulator_dev *core_rdev;
+ struct regulator_dev *cpu_rdev;
+ int core_min_uV;
+};
+
+static inline struct tegra_regulator_coupler *
+to_tegra_coupler(struct regulator_coupler *coupler)
+{
+ return container_of(coupler, struct tegra_regulator_coupler, coupler);
+}
+
+static int tegra30_core_limit(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV = 0;
+ int core_max_uV;
+ int core_cur_uV;
+ int err;
+
+ if (tegra->core_min_uV > 0)
+ return tegra->core_min_uV;
+
+ core_cur_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_cur_uV < 0)
+ return core_cur_uV;
+
+ core_max_uV = max(core_cur_uV, 1200000);
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ /*
+ * Limit minimum CORE voltage to a value left from bootloader or,
+ * if it's unreasonably low value, to the most common 1.2v or to
+ * whatever maximum value defined via board's device-tree.
+ */
+ tegra->core_min_uV = core_max_uV;
+
+ pr_info("core minimum voltage limited to %duV\n", tegra->core_min_uV);
+
+ return tegra->core_min_uV;
+}
+
+static int tegra30_core_cpu_limit(int cpu_uV)
+{
+ if (cpu_uV < 800000)
+ return 950000;
+
+ if (cpu_uV < 900000)
+ return 1000000;
+
+ if (cpu_uV < 1000000)
+ return 1100000;
+
+ if (cpu_uV < 1100000)
+ return 1200000;
+
+ if (cpu_uV < 1250000) {
+ switch (tegra_sku_info.cpu_speedo_id) {
+ case 0 ... 1:
+ case 4:
+ case 7 ... 8:
+ return 1200000;
+
+ default:
+ return 1300000;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
+ struct regulator_dev *cpu_rdev,
+ struct regulator_dev *core_rdev)
+{
+ int core_min_uV, core_max_uV = INT_MAX;
+ int cpu_min_uV, cpu_max_uV = INT_MAX;
+ int cpu_min_uV_consumers = 0;
+ int core_min_limited_uV;
+ int core_target_uV;
+ int cpu_target_uV;
+ int core_max_step;
+ int cpu_max_step;
+ int max_spread;
+ int core_uV;
+ int cpu_uV;
+ int err;
+
+ /*
+ * CPU voltage should not got lower than 300mV from the CORE.
+ * CPU voltage should stay below the CORE by 100mV+, depending
+ * by the CORE voltage. This applies to all Tegra30 SoC's.
+ */
+ max_spread = cpu_rdev->constraints->max_spread[0];
+ cpu_max_step = cpu_rdev->constraints->max_uV_step;
+ core_max_step = core_rdev->constraints->max_uV_step;
+
+ if (!max_spread) {
+ pr_err_once("cpu-core max-spread is undefined in device-tree\n");
+ max_spread = 300000;
+ }
+
+ if (!cpu_max_step) {
+ pr_err_once("cpu max-step is undefined in device-tree\n");
+ cpu_max_step = 150000;
+ }
+
+ if (!core_max_step) {
+ pr_err_once("core max-step is undefined in device-tree\n");
+ core_max_step = 150000;
+ }
+
+ /*
+ * The CORE voltage scaling is currently not hooked up in drivers,
+ * hence we will limit the minimum CORE voltage to a reasonable value.
+ * This should be good enough for the time being.
+ */
+ core_min_uV = tegra30_core_limit(tegra, core_rdev);
+ if (core_min_uV < 0)
+ return core_min_uV;
+
+ err = regulator_check_consumers(core_rdev, &core_min_uV, &core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = regulator_get_voltage_rdev(core_rdev);
+ if (core_uV < 0)
+ return core_uV;
+
+ cpu_min_uV = core_min_uV - max_spread;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV, &cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_consumers(cpu_rdev, &cpu_min_uV_consumers,
+ &cpu_max_uV, PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ err = regulator_check_voltage(cpu_rdev, &cpu_min_uV, &cpu_max_uV);
+ if (err)
+ return err;
+
+ cpu_uV = regulator_get_voltage_rdev(cpu_rdev);
+ if (cpu_uV < 0)
+ return cpu_uV;
+
+ /*
+ * CPU's regulator may not have any consumers, hence the voltage
+ * must not be changed in that case because CPU simply won't
+ * survive the voltage drop if it's running on a higher frequency.
+ */
+ if (!cpu_min_uV_consumers)
+ cpu_min_uV = max(cpu_uV, cpu_min_uV);
+
+ /*
+ * Bootloader shall set up voltages correctly, but if it
+ * happens that there is a violation, then try to fix it
+ * at first.
+ */
+ core_min_limited_uV = tegra30_core_cpu_limit(cpu_uV);
+ if (core_min_limited_uV < 0)
+ return core_min_limited_uV;
+
+ core_min_uV = max(core_min_uV, tegra30_core_cpu_limit(cpu_min_uV));
+
+ err = regulator_check_voltage(core_rdev, &core_min_uV, &core_max_uV);
+ if (err)
+ return err;
+
+ if (core_min_limited_uV > core_uV) {
+ pr_err("core voltage constraint violated: %d %d %d\n",
+ core_uV, core_min_limited_uV, cpu_uV);
+ goto update_core;
+ }
+
+ while (cpu_uV != cpu_min_uV || core_uV != core_min_uV) {
+ if (cpu_uV < cpu_min_uV) {
+ cpu_target_uV = min(cpu_uV + cpu_max_step, cpu_min_uV);
+ } else {
+ cpu_target_uV = max(cpu_uV - cpu_max_step, cpu_min_uV);
+ cpu_target_uV = max(core_uV - max_spread, cpu_target_uV);
+ }
+
+ if (cpu_uV == cpu_target_uV)
+ goto update_core;
+
+ err = regulator_set_voltage_rdev(cpu_rdev,
+ cpu_target_uV,
+ cpu_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ cpu_uV = cpu_target_uV;
+update_core:
+ core_min_limited_uV = tegra30_core_cpu_limit(cpu_uV);
+ if (core_min_limited_uV < 0)
+ return core_min_limited_uV;
+
+ core_target_uV = max(core_min_limited_uV, core_min_uV);
+
+ if (core_uV < core_target_uV) {
+ core_target_uV = min(core_target_uV, core_uV + core_max_step);
+ core_target_uV = min(core_target_uV, cpu_uV + max_spread);
+ } else {
+ core_target_uV = max(core_target_uV, core_uV - core_max_step);
+ }
+
+ if (core_uV == core_target_uV)
+ continue;
+
+ err = regulator_set_voltage_rdev(core_rdev,
+ core_target_uV,
+ core_max_uV,
+ PM_SUSPEND_ON);
+ if (err)
+ return err;
+
+ core_uV = core_target_uV;
+ }
+
+ return 0;
+}
+
+static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct regulator_dev *core_rdev = tegra->core_rdev;
+ struct regulator_dev *cpu_rdev = tegra->cpu_rdev;
+
+ if ((core_rdev != rdev && cpu_rdev != rdev) || state != PM_SUSPEND_ON) {
+ pr_err("regulators are not coupled properly\n");
+ return -EINVAL;
+ }
+
+ return tegra30_voltage_update(tegra, cpu_rdev, core_rdev);
+}
+
+static int tegra30_regulator_attach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+ struct device_node *np = rdev->dev.of_node;
+
+ if (of_property_read_bool(np, "nvidia,tegra-core-regulator") &&
+ !tegra->core_rdev) {
+ tegra->core_rdev = rdev;
+ return 0;
+ }
+
+ if (of_property_read_bool(np, "nvidia,tegra-cpu-regulator") &&
+ !tegra->cpu_rdev) {
+ tegra->cpu_rdev = rdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tegra30_regulator_detach(struct regulator_coupler *coupler,
+ struct regulator_dev *rdev)
+{
+ struct tegra_regulator_coupler *tegra = to_tegra_coupler(coupler);
+
+ if (tegra->core_rdev == rdev) {
+ tegra->core_rdev = NULL;
+ return 0;
+ }
+
+ if (tegra->cpu_rdev == rdev) {
+ tegra->cpu_rdev = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct tegra_regulator_coupler tegra30_coupler = {
+ .coupler = {
+ .attach_regulator = tegra30_regulator_attach,
+ .detach_regulator = tegra30_regulator_detach,
+ .balance_voltage = tegra30_regulator_balance_voltage,
+ },
+};
+
+static int __init tegra_regulator_coupler_init(void)
+{
+ if (!of_machine_is_compatible("nvidia,tegra30"))
+ return 0;
+
+ return regulator_coupler_register(&tegra30_coupler.coupler);
+}
+arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index cf545f4..f5b82ff 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -80,6 +80,38 @@
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+config TI_K3_RINGACC
+ bool "K3 Ring accelerator Sub System"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_INTA_IRQCHIP
+ help
+ Say y here to support the K3 Ring accelerator module.
+ The Ring Accelerator (RINGACC or RA) provides hardware acceleration
+ to enable straightforward passing of work between a producer
+ and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
+ If unsure, say N.
+
+config TI_K3_SOCINFO
+ bool
+ depends on ARCH_K3 || COMPILE_TEST
+ select SOC_BUS
+ select MFD_SYSCON
+ help
+ Include support for the SoC bus socinfo for the TI K3 Multicore SoC
+ platforms to provide information about the SoC family and
+ variant to user space.
+
+config TI_PRUSS
+ tristate "TI PRU-ICSS Subsystem Platform drivers"
+ depends on SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ select MFD_SYSCON
+ help
+ TI PRU-ICSS Subsystem platform specific support.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index b3868d3..cc3c972 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -6,6 +6,11 @@
knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
obj-$(CONFIG_AMX3_PM) += pm33xx.o
+obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
+obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
+obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
+obj-$(CONFIG_TI_PRUSS) += pruss.o
+obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
new file mode 100644
index 0000000..1147dc4
--- /dev/null
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -0,0 +1,1254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 NAVSS Ring Accelerator subsystem driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+static LIST_HEAD(k3_ringacc_list);
+static DEFINE_MUTEX(k3_ringacc_list_lock);
+
+#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+
+/**
+ * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
+ *
+ * @resv_16: Reserved
+ * @db: Ring Doorbell Register
+ * @resv_4: Reserved
+ * @occ: Ring Occupancy Register
+ * @indx: Ring Current Index Register
+ * @hwocc: Ring Hardware Occupancy Register
+ * @hwindx: Ring Hardware Current Index Register
+ */
+struct k3_ring_rt_regs {
+ u32 resv_16[4];
+ u32 db;
+ u32 resv_4[1];
+ u32 occ;
+ u32 indx;
+ u32 hwocc;
+ u32 hwindx;
+};
+
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+
+/**
+ * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
+ *
+ * @head_data: Ring Head Entry Data Registers
+ * @tail_data: Ring Tail Entry Data Registers
+ * @peek_head_data: Ring Peek Head Entry Data Regs
+ * @peek_tail_data: Ring Peek Tail Entry Data Regs
+ */
+struct k3_ring_fifo_regs {
+ u32 head_data[128];
+ u32 tail_data[128];
+ u32 peek_head_data[128];
+ u32 peek_tail_data[128];
+};
+
+/**
+ * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
+ *
+ * @revision: Revision Register
+ * @config: Config Register
+ */
+struct k3_ringacc_proxy_gcfg_regs {
+ u32 revision;
+ u32 config;
+};
+
+#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
+
+/**
+ * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
+ *
+ * @control: Proxy Control Register
+ * @status: Proxy Status Register
+ * @resv_512: Reserved
+ * @data: Proxy Data Register
+ */
+struct k3_ringacc_proxy_target_regs {
+ u32 control;
+ u32 status;
+ u8 resv_512[504];
+ u32 data[128];
+};
+
+#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
+#define K3_RINGACC_PROXY_NOT_USED (-1)
+
+enum k3_ringacc_proxy_access_mode {
+ PROXY_ACCESS_MODE_HEAD = 0,
+ PROXY_ACCESS_MODE_TAIL = 1,
+ PROXY_ACCESS_MODE_PEEK_HEAD = 2,
+ PROXY_ACCESS_MODE_PEEK_TAIL = 3,
+};
+
+#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
+#define K3_RINGACC_FIFO_REGS_STEP 0x1000
+#define K3_RINGACC_MAX_DB_RING_CNT (127U)
+
+struct k3_ring_ops {
+ int (*push_tail)(struct k3_ring *ring, void *elm);
+ int (*push_head)(struct k3_ring *ring, void *elm);
+ int (*pop_tail)(struct k3_ring *ring, void *elm);
+ int (*pop_head)(struct k3_ring *ring, void *elm);
+};
+
+/**
+ * struct k3_ring_state - Internal state tracking structure
+ *
+ * @free: Number of free entries
+ * @occ: Occupancy
+ * @windex: Write index
+ * @rindex: Read index
+ */
+struct k3_ring_state {
+ u32 free;
+ u32 occ;
+ u32 windex;
+ u32 rindex;
+};
+
+/**
+ * struct k3_ring - RA Ring descriptor
+ *
+ * @rt: Ring control/status registers
+ * @fifos: Ring queues registers
+ * @proxy: Ring Proxy Datapath registers
+ * @ring_mem_dma: Ring buffer dma address
+ * @ring_mem_virt: Ring buffer virt address
+ * @ops: Ring operations
+ * @size: Ring size in elements
+ * @elm_size: Size of the ring element
+ * @mode: Ring mode
+ * @flags: flags
+ * @ring_id: Ring Id
+ * @parent: Pointer on struct @k3_ringacc
+ * @use_count: Use count for shared rings
+ * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ */
+struct k3_ring {
+ struct k3_ring_rt_regs __iomem *rt;
+ struct k3_ring_fifo_regs __iomem *fifos;
+ struct k3_ringacc_proxy_target_regs __iomem *proxy;
+ dma_addr_t ring_mem_dma;
+ void *ring_mem_virt;
+ struct k3_ring_ops *ops;
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+ u32 flags;
+#define K3_RING_FLAG_BUSY BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+ struct k3_ring_state state;
+ u32 ring_id;
+ struct k3_ringacc *parent;
+ u32 use_count;
+ int proxy_id;
+};
+
+struct k3_ringacc_ops {
+ int (*init)(struct platform_device *pdev, struct k3_ringacc *ringacc);
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator descriptor
+ *
+ * @dev: pointer on RA device
+ * @proxy_gcfg: RA proxy global config registers
+ * @proxy_target_base: RA proxy datapath region
+ * @num_rings: number of ring in RA
+ * @rings_inuse: bitfield for ring usage tracking
+ * @rm_gp_range: general purpose rings range from tisci
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ * @num_proxies: number of RA proxies
+ * @proxy_inuse: bitfield for proxy usage tracking
+ * @rings: array of rings descriptors (struct @k3_ring)
+ * @list: list of RAs in the system
+ * @req_lock: protect rings allocation
+ * @tisci: pointer ti-sci handle
+ * @tisci_ring_ops: ti-sci rings ops
+ * @tisci_dev_id: ti-sci device id
+ * @ops: SoC specific ringacc operation
+ */
+struct k3_ringacc {
+ struct device *dev;
+ struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
+ void __iomem *proxy_target_base;
+ u32 num_rings; /* number of rings in Ringacc module */
+ unsigned long *rings_inuse;
+ struct ti_sci_resource *rm_gp_range;
+
+ bool dma_ring_reset_quirk;
+ u32 num_proxies;
+ unsigned long *proxy_inuse;
+
+ struct k3_ring *rings;
+ struct list_head list;
+ struct mutex req_lock; /* protect rings allocation */
+
+ const struct ti_sci_handle *tisci;
+ const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
+ u32 tisci_dev_id;
+
+ const struct k3_ringacc_ops *ops;
+};
+
+/**
+ * struct k3_ringacc - Rings accelerator SoC data
+ *
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ */
+struct k3_ringacc_soc_data {
+ unsigned dma_ring_reset_quirk:1;
+};
+
+static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
+{
+ return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
+ (4 << ring->elm_size);
+}
+
+static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
+{
+ return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_ring_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_ringacc_ring_pop_mem,
+};
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_msg_ops = {
+ .push_tail = k3_ringacc_ring_push_io,
+ .push_head = k3_ringacc_ring_push_head_io,
+ .pop_tail = k3_ringacc_ring_pop_tail_io,
+ .pop_head = k3_ringacc_ring_pop_io,
+};
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
+
+static struct k3_ring_ops k3_ring_mode_proxy_ops = {
+ .push_tail = k3_ringacc_ring_push_tail_proxy,
+ .push_head = k3_ringacc_ring_push_head_proxy,
+ .pop_tail = k3_ringacc_ring_pop_tail_proxy,
+ .pop_head = k3_ringacc_ring_pop_head_proxy,
+};
+
+static void k3_ringacc_ring_dump(struct k3_ring *ring)
+{
+ struct device *dev = ring->parent->dev;
+
+ dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
+ dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
+ &ring->ring_mem_dma);
+ dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
+ ring->elm_size, ring->size, ring->mode, ring->proxy_id);
+ dev_dbg(dev, "dump flags %08X\n", ring->flags);
+
+ dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
+ dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
+ dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
+ dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
+ dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
+
+ if (ring->ring_mem_virt)
+ print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
+ 16, 1, ring->ring_mem_virt, 16 * 8, false);
+}
+
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags)
+{
+ int proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (id == K3_RINGACC_RING_ID_ANY) {
+ /* Request for any general purpose ring */
+ struct ti_sci_resource_desc *gp_rings =
+ &ringacc->rm_gp_range->desc[0];
+ unsigned long size;
+
+ size = gp_rings->start + gp_rings->num;
+ id = find_next_zero_bit(ringacc->rings_inuse, size,
+ gp_rings->start);
+ if (id == size)
+ goto error;
+ } else if (id < 0) {
+ goto error;
+ }
+
+ if (test_bit(id, ringacc->rings_inuse) &&
+ !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
+ goto error;
+ else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
+ goto out;
+
+ if (flags & K3_RINGACC_RING_USE_PROXY) {
+ proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies, 0);
+ if (proxy_id == ringacc->num_proxies)
+ goto error;
+ }
+
+ if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ set_bit(proxy_id, ringacc->proxy_inuse);
+ ringacc->rings[id].proxy_id = proxy_id;
+ dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
+ proxy_id);
+ } else {
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
+ }
+
+ set_bit(id, ringacc->rings_inuse);
+out:
+ ringacc->rings[id].use_count++;
+ mutex_unlock(&ringacc->req_lock);
+ return &ringacc->rings[id];
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ if (!fwd_ring || !compl_ring)
+ return -EINVAL;
+
+ *fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
+ if (!(*fwd_ring))
+ return -ENODEV;
+
+ *compl_ring = k3_ringacc_request_ring(ringacc, compl_id, 0);
+ if (!(*compl_ring)) {
+ k3_ringacc_ring_free(*fwd_ring);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
+
+static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ ring->size,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ k3_ringacc_ring_reset_sci(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
+
+static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
+ enum k3_ring_mode mode)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ mode,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return;
+
+ if (!ring->parent->dma_ring_reset_quirk)
+ goto reset;
+
+ if (!occ)
+ occ = readl(&ring->rt->occ);
+
+ if (occ) {
+ u32 db_ring_cnt, db_ring_cnt_cur;
+
+ dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
+ ring->ring_id, occ);
+ /* TI-SCI ring reset */
+ k3_ringacc_ring_reset_sci(ring);
+
+ /*
+ * Setup the ring in ring/doorbell mode (if not already in this
+ * mode)
+ */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(
+ ring, K3_RINGACC_RING_MODE_RING);
+ /*
+ * Ring the doorbell 2**22 – ringOcc times.
+ * This will wrap the internal UDMAP ring state occupancy
+ * counter (which is 21-bits wide) to 0.
+ */
+ db_ring_cnt = (1U << 22) - occ;
+
+ while (db_ring_cnt != 0) {
+ /*
+ * Ring the doorbell with the maximum count each
+ * iteration if possible to minimize the total
+ * of writes
+ */
+ if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
+ db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
+ else
+ db_ring_cnt_cur = db_ring_cnt;
+
+ writel(db_ring_cnt_cur, &ring->rt->db);
+ db_ring_cnt -= db_ring_cnt_cur;
+ }
+
+ /* Restore the original ring mode (if not ring mode) */
+ if (ring->mode != K3_RINGACC_RING_MODE_RING)
+ k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
+ }
+
+reset:
+ /* Reset the ring */
+ k3_ringacc_ring_reset(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
+
+static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ int ret;
+
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring->ring_id,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
+ ret, ring->ring_id);
+}
+
+int k3_ringacc_ring_free(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc;
+
+ if (!ring)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (--ring->use_count)
+ goto out;
+
+ if (!(ring->flags & K3_RING_FLAG_BUSY))
+ goto no_init;
+
+ k3_ringacc_ring_free_sci(ring);
+
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt, ring->ring_mem_dma);
+ ring->flags = 0;
+ ring->ops = NULL;
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
+ clear_bit(ring->proxy_id, ringacc->proxy_inuse);
+ ring->proxy = NULL;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+no_init:
+ clear_bit(ring->ring_id, ringacc->rings_inuse);
+
+out:
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
+
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->ring_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
+{
+ if (!ring)
+ return -EINVAL;
+
+ return ring->parent->tisci_dev_id;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
+
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
+{
+ int irq_num;
+
+ if (!ring)
+ return -EINVAL;
+
+ irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+ if (irq_num <= 0)
+ irq_num = -EINVAL;
+ return irq_num;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
+
+static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
+{
+ struct k3_ringacc *ringacc = ring->parent;
+ u32 ring_idx;
+ int ret;
+
+ if (!ringacc->tisci)
+ return -EINVAL;
+
+ ring_idx = ring->ring_id;
+ ret = ringacc->tisci_ring_ops->config(
+ ringacc->tisci,
+ TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
+ ringacc->tisci_dev_id,
+ ring_idx,
+ lower_32_bits(ring->ring_mem_dma),
+ upper_32_bits(ring->ring_mem_dma),
+ ring->size,
+ ring->mode,
+ ring->elm_size,
+ 0);
+ if (ret)
+ dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
+ ret, ring_idx);
+
+ return ret;
+}
+
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ int ret = 0;
+
+ if (!ring || !cfg)
+ return -EINVAL;
+ ringacc = ring->parent;
+
+ if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
+ cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
+ cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
+ !test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
+ ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
+ cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
+ dev_err(ringacc->dev,
+ "Message mode must use proxy for %u element size\n",
+ 4 << ring->elm_size);
+ return -EINVAL;
+ }
+
+ /*
+ * In case of shared ring only the first user (master user) can
+ * configure the ring. The sequence should be by the client:
+ * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
+ * k3_ringacc_ring_cfg(ring, cfg); # master configuration
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
+ */
+ if (ring->use_count != 1)
+ return 0;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
+ ring->proxy = ringacc->proxy_target_base +
+ ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
+
+ switch (ring->mode) {
+ case K3_RINGACC_RING_MODE_RING:
+ ring->ops = &k3_ring_mode_ring_ops;
+ break;
+ case K3_RINGACC_RING_MODE_MESSAGE:
+ if (ring->proxy)
+ ring->ops = &k3_ring_mode_proxy_ops;
+ else
+ ring->ops = &k3_ring_mode_msg_ops;
+ break;
+ default:
+ ring->ops = NULL;
+ ret = -EINVAL;
+ goto err_free_proxy;
+ }
+
+ ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+ ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
+ K3_RING_FLAG_SHARED : 0;
+
+ k3_ringacc_ring_dump(ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ringacc->dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+err_free_proxy:
+ ring->proxy = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
+
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return ring->size;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
+
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.free)
+ ring->state.free = ring->size - readl(&ring->rt->occ);
+
+ return ring->state.free;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
+
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
+{
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ return readl(&ring->rt->occ);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
+
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
+{
+ return !k3_ringacc_ring_get_free(ring);
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
+
+enum k3_ringacc_access_mode {
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
+ K3_RINGACC_ACCESS_MODE_POP_TAIL,
+ K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
+ K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
+};
+
+#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
+#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
+static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
+ enum k3_ringacc_proxy_access_mode mode)
+{
+ u32 val;
+
+ val = ring->ring_id;
+ val |= K3_RINGACC_PROXY_MODE(mode);
+ val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
+ writel(val, &ring->proxy->control);
+ return 0;
+}
+
+static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ ptr = (void __iomem *)&ring->proxy->data;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->state.occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->state.free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->state.free,
+ ring->state.occ);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_proxy(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
+ enum k3_ringacc_access_mode access_mode)
+{
+ void __iomem *ptr;
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ ptr = (void __iomem *)&ring->fifos->head_data;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ ptr = (void __iomem *)&ring->fifos->tail_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ptr += k3_ringacc_ring_get_fifo_pos(ring);
+
+ switch (access_mode) {
+ case K3_RINGACC_ACCESS_MODE_POP_HEAD:
+ case K3_RINGACC_ACCESS_MODE_POP_TAIL:
+ dev_dbg(ring->parent->dev,
+ "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_fromio(elem, ptr, (4 << ring->elm_size));
+ ring->state.occ--;
+ break;
+ case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
+ case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
+ dev_dbg(ring->parent->dev,
+ "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
+ access_mode);
+ memcpy_toio(ptr, elem, (4 << ring->elm_size));
+ ring->state.free--;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n",
+ ring->state.free, ring->state.windex, ring->state.occ,
+ ring->state.rindex);
+ return 0;
+}
+
+static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
+}
+
+static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
+}
+
+static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
+{
+ return k3_ringacc_ring_access_io(ring, elem,
+ K3_RINGACC_ACCESS_MODE_POP_HEAD);
+}
+
+static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
+
+ memcpy(elem_ptr, elem, (4 << ring->elm_size));
+
+ ring->state.windex = (ring->state.windex + 1) % ring->size;
+ ring->state.free--;
+ writel(1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ return 0;
+}
+
+static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
+ ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_tail)
+ ret = ring->ops->push_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
+
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
+ ring->state.free, ring->state.windex);
+
+ if (k3_ringacc_ring_is_full(ring))
+ return -ENOMEM;
+
+ if (ring->ops && ring->ops->push_head)
+ ret = ring->ops->push_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
+
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.occ)
+ ring->state.occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
+ ring->state.rindex);
+
+ if (!ring->state.occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_head)
+ ret = ring->ops->pop_head(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
+
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
+ return -EINVAL;
+
+ if (!ring->state.occ)
+ ring->state.occ = k3_ringacc_ring_get_occ(ring);
+
+ dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
+ ring->state.occ, ring->state.rindex);
+
+ if (!ring->state.occ)
+ return -ENODATA;
+
+ if (ring->ops && ring->ops->pop_tail)
+ ret = ring->ops->pop_tail(ring, elem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
+
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct device_node *ringacc_np;
+ struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
+ struct k3_ringacc *entry;
+
+ ringacc_np = of_parse_phandle(np, property, 0);
+ if (!ringacc_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_for_each_entry(entry, &k3_ringacc_list, list)
+ if (entry->dev->of_node == ringacc_np) {
+ ringacc = entry;
+ break;
+ }
+ mutex_unlock(&k3_ringacc_list_lock);
+ of_node_put(ringacc_np);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
+
+static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
+{
+ struct device_node *node = ringacc->dev->of_node;
+ struct device *dev = ringacc->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
+ if (ret) {
+ dev_err(dev, "ti,num-rings read failure %d\n", ret);
+ return ret;
+ }
+
+ ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
+ if (IS_ERR(ringacc->tisci)) {
+ ret = PTR_ERR(ringacc->tisci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ ringacc->tisci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,sci-dev-id",
+ &ringacc->tisci_dev_id);
+ if (ret) {
+ dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
+ return ret;
+ }
+
+ pdev->id = ringacc->tisci_dev_id;
+
+ ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
+ ringacc->tisci_dev_id,
+ "ti,sci-rm-range-gp-rings");
+ if (IS_ERR(ringacc->rm_gp_range)) {
+ dev_err(dev, "Failed to allocate MSI interrupts\n");
+ return PTR_ERR(ringacc->rm_gp_range);
+ }
+
+ return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
+ ringacc->rm_gp_range);
+}
+
+static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
+ .dma_ring_reset_quirk = 1,
+};
+
+static const struct soc_device_attribute k3_ringacc_socinfo[] = {
+ { .family = "AM65X",
+ .revision = "SR1.0",
+ .data = &k3_ringacc_soc_data_sr1
+ },
+ {/* sentinel */}
+};
+
+static int k3_ringacc_init(struct platform_device *pdev,
+ struct k3_ringacc *ringacc)
+{
+ const struct soc_device_attribute *soc;
+ void __iomem *base_fifo, *base_rt;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, i;
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_TI_SCI_INTA_MSI);
+ if (!dev->msi_domain) {
+ dev_err(dev, "Failed to get MSI domain\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = k3_ringacc_probe_dt(ringacc);
+ if (ret)
+ return ret;
+
+ soc = soc_device_match(k3_ringacc_socinfo);
+ if (soc && soc->data) {
+ const struct k3_ringacc_soc_data *soc_data = soc->data;
+
+ ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return PTR_ERR(base_rt);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
+ base_fifo = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_fifo))
+ return PTR_ERR(base_fifo);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
+ ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_gcfg))
+ return PTR_ERR(ringacc->proxy_gcfg);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "proxy_target");
+ ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ringacc->proxy_target_base))
+ return PTR_ERR(ringacc->proxy_target_base);
+
+ ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
+ K3_RINGACC_PROXY_CFG_THREADS_MASK;
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+ ringacc->proxy_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_proxies),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
+ return -ENOMEM;
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ ringacc->rings[i].rt = base_rt +
+ K3_RINGACC_RT_REGS_STEP * i;
+ ringacc->rings[i].fifos = base_fifo +
+ K3_RINGACC_FIFO_REGS_STEP * i;
+ ringacc->rings[i].parent = ringacc;
+ ringacc->rings[i].ring_id = i;
+ ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
+ ringacc->num_rings,
+ ringacc->rm_gp_range->desc[0].start,
+ ringacc->rm_gp_range->desc[0].num,
+ ringacc->tisci_dev_id);
+ dev_info(dev, "dma-ring-reset-quirk: %s\n",
+ ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
+ dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
+ readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
+
+ return 0;
+}
+
+struct ringacc_match_data {
+ struct k3_ringacc_ops ops;
+};
+
+static struct ringacc_match_data k3_ringacc_data = {
+ .ops = {
+ .init = k3_ringacc_init,
+ },
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id k3_ringacc_of_match[] = {
+ { .compatible = "ti,am654-navss-ringacc", .data = &k3_ringacc_data, },
+ {},
+};
+
+static int k3_ringacc_probe(struct platform_device *pdev)
+{
+ const struct ringacc_match_data *match_data;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ int ret;
+
+ match = of_match_node(k3_ringacc_of_match, dev->of_node);
+ if (!match)
+ return -ENODEV;
+ match_data = match->data;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return -ENOMEM;
+
+ ringacc->dev = dev;
+ mutex_init(&ringacc->req_lock);
+ ringacc->ops = &match_data->ops;
+
+ ret = ringacc->ops->init(pdev, ringacc);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, ringacc);
+
+ mutex_lock(&k3_ringacc_list_lock);
+ list_add_tail(&ringacc->list, &k3_ringacc_list);
+ mutex_unlock(&k3_ringacc_list_lock);
+
+ return 0;
+}
+
+static struct platform_driver k3_ringacc_driver = {
+ .probe = k3_ringacc_probe,
+ .driver = {
+ .name = "k3-ringacc",
+ .of_match_table = k3_ringacc_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(k3_ringacc_driver);
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
new file mode 100644
index 0000000..bbbc2d2
--- /dev/null
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 SoC info driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+#define CTRLMMR_WKUP_JTAGID_REG 0
+/*
+ * Bits:
+ * 31-28 VARIANT Device variant
+ * 27-12 PARTNO Part number
+ * 11-1 MFG Indicates TI as manufacturer (0x17)
+ * 1 Always 1
+ */
+#define CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT (28)
+#define CTRLMMR_WKUP_JTAGID_VARIANT_MASK GENMASK(31, 28)
+
+#define CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT (12)
+#define CTRLMMR_WKUP_JTAGID_PARTNO_MASK GENMASK(27, 12)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_SHIFT (1)
+#define CTRLMMR_WKUP_JTAGID_MFG_MASK GENMASK(11, 1)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17
+
+static const struct k3_soc_id {
+ unsigned int id;
+ const char *family_name;
+} k3_soc_ids[] = {
+ { 0xBB5A, "AM65X" },
+ { 0xBB64, "J721E" },
+ { 0xBB6D, "J7200" },
+};
+
+static int
+k3_chipinfo_partno_to_names(unsigned int partno,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(k3_soc_ids); i++)
+ if (partno == k3_soc_ids[i].id) {
+ soc_dev_attr->family = k3_soc_ids[i].family_name;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int k3_chipinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device *dev = &pdev->dev;
+ struct soc_device *soc_dev;
+ struct regmap *regmap;
+ u32 partno_id;
+ u32 variant;
+ u32 jtag_id;
+ u32 mfg;
+ int ret;
+
+ regmap = device_node_to_regmap(node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, CTRLMMR_WKUP_JTAGID_REG, &jtag_id);
+ if (ret < 0)
+ return ret;
+
+ mfg = (jtag_id & CTRLMMR_WKUP_JTAGID_MFG_MASK) >>
+ CTRLMMR_WKUP_JTAGID_MFG_SHIFT;
+
+ if (mfg != CTRLMMR_WKUP_JTAGID_MFG_TI) {
+ dev_err(dev, "Invalid MFG SoC\n");
+ return -ENODEV;
+ }
+
+ variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >>
+ CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT;
+ variant++;
+
+ partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >>
+ CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ if (ret) {
+ dev_err(dev, "Unknown SoC JTAGID[0x%08X]\n", jtag_id);
+ ret = -ENODEV;
+ goto err_free_rev;
+ }
+
+ node = of_find_node_by_path("/");
+ of_property_read_string(node, "model", &soc_dev_attr->machine);
+ of_node_put(node);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err_free_rev;
+ }
+
+ dev_info(dev, "Family:%s rev:%s JTAGID[0x%08x] Detected\n",
+ soc_dev_attr->family,
+ soc_dev_attr->revision, jtag_id);
+
+ return 0;
+
+err_free_rev:
+ kfree(soc_dev_attr->revision);
+err:
+ kfree(soc_dev_attr);
+ return ret;
+}
+
+static const struct of_device_id k3_chipinfo_of_match[] = {
+ { .compatible = "ti,am654-chipid", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k3_chipinfo_driver = {
+ .driver = {
+ .name = "k3-chipinfo",
+ .of_match_table = k3_chipinfo_of_match,
+ },
+ .probe = k3_chipinfo_probe,
+};
+
+static int __init k3_chipinfo_init(void)
+{
+ return platform_driver_register(&k3_chipinfo_driver);
+}
+subsys_initcall(k3_chipinfo_init);
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 981a901..56597f6 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -355,7 +355,7 @@
}
}
-static int dma_debug_show(struct seq_file *s, void *v)
+static int knav_dma_debug_show(struct seq_file *s, void *v)
{
struct knav_dma_device *dma;
@@ -370,17 +370,7 @@
return 0;
}
-static int knav_dma_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dma_debug_show, NULL);
-}
-
-static const struct file_operations knav_dma_debug_ops = {
- .open = knav_dma_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
static int of_channel_match_helper(struct device_node *np, const char *name,
const char **dma_instance)
@@ -780,7 +770,7 @@
}
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
- &knav_dma_debug_ops);
+ &knav_dma_debug_fops);
device_ready = true;
return ret;
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
index 038aec3..a01eda7 100644
--- a/drivers/soc/ti/knav_qmss.h
+++ b/drivers/soc/ti/knav_qmss.h
@@ -67,7 +67,7 @@
u32 link_ram_size0;
u32 link_ram_base1;
u32 __pad2[2];
- u32 starvation[0];
+ u32 starvation[];
};
struct knav_reg_region {
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
index 1762d89..fde66e2 100644
--- a/drivers/soc/ti/knav_qmss_acc.c
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -450,7 +450,7 @@
return 0;
}
-struct knav_range_ops knav_acc_range_ops = {
+static struct knav_range_ops knav_acc_range_ops = {
.set_notify = knav_acc_set_notify,
.init_queue = knav_acc_init_queue,
.open_queue = knav_acc_open_queue,
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index b821047..53e36d4 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -25,6 +25,8 @@
static struct knav_device *kdev;
static DEFINE_MUTEX(knav_dev_lock);
+#define knav_dev_lock_held() \
+ lockdep_is_held(&knav_dev_lock)
/* Queue manager register indices in DTS */
#define KNAV_QUEUE_PEEK_REG_INDEX 0
@@ -52,8 +54,9 @@
#define knav_queue_idx_to_inst(kdev, idx) \
(kdev->instances + (idx << kdev->inst_shift))
-#define for_each_handle_rcu(qh, inst) \
- list_for_each_entry_rcu(qh, &inst->handles, list)
+#define for_each_handle_rcu(qh, inst) \
+ list_for_each_entry_rcu(qh, &inst->handles, list, \
+ knav_dev_lock_held())
#define for_each_instance(idx, inst, kdev) \
for (idx = 0, inst = kdev->instances; \
@@ -406,7 +409,7 @@
return 0;
}
-struct knav_range_ops knav_gp_range_ops = {
+static struct knav_range_ops knav_gp_range_ops = {
.set_notify = knav_gp_set_notify,
.open_queue = knav_gp_open_queue,
.close_queue = knav_gp_close_queue,
@@ -475,17 +478,7 @@
return 0;
}
-static int knav_queue_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, knav_queue_debug_show, NULL);
-}
-
-static const struct file_operations knav_queue_debug_ops = {
- .open = knav_queue_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
u32 flags)
@@ -1877,7 +1870,7 @@
}
debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
- &knav_queue_debug_ops);
+ &knav_queue_debug_fops);
device_ready = true;
return 0;
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
new file mode 100644
index 0000000..4a782bf
--- /dev/null
+++ b/drivers/soc/ti/omap_prm.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP2+ PRM driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+#include <linux/delay.h>
+
+#include <linux/platform_data/ti-prm.h>
+
+enum omap_prm_domain_mode {
+ OMAP_PRMD_OFF,
+ OMAP_PRMD_RETENTION,
+ OMAP_PRMD_ON_INACTIVE,
+ OMAP_PRMD_ON_ACTIVE,
+};
+
+struct omap_prm_domain_map {
+ unsigned int usable_modes; /* Mask of hardware supported modes */
+ unsigned long statechange:1; /* Optional low-power state change */
+ unsigned long logicretstate:1; /* Optional logic off mode */
+};
+
+struct omap_prm_domain {
+ struct device *dev;
+ struct omap_prm *prm;
+ struct generic_pm_domain pd;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *cap;
+ u32 pwrstctrl_saved;
+};
+
+struct omap_rst_map {
+ s8 rst;
+ s8 st;
+};
+
+struct omap_prm_data {
+ u32 base;
+ const char *name;
+ const char *clkdm_name;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *dmap;
+ u16 rstctrl;
+ u16 rstst;
+ const struct omap_rst_map *rstmap;
+ u8 flags;
+};
+
+struct omap_prm {
+ const struct omap_prm_data *data;
+ void __iomem *base;
+ struct omap_prm_domain *prmd;
+};
+
+struct omap_reset_data {
+ struct reset_controller_dev rcdev;
+ struct omap_prm *prm;
+ u32 mask;
+ spinlock_t lock;
+ struct clockdomain *clkdm;
+ struct device *dev;
+};
+
+#define genpd_to_prm_domain(gpd) container_of(gpd, struct omap_prm_domain, pd)
+#define to_omap_reset_data(p) container_of((p), struct omap_reset_data, rcdev)
+
+#define OMAP_MAX_RESETS 8
+#define OMAP_RESET_MAX_WAIT 10000
+
+#define OMAP_PRM_HAS_RSTCTRL BIT(0)
+#define OMAP_PRM_HAS_RSTST BIT(1)
+#define OMAP_PRM_HAS_NO_CLKDM BIT(2)
+
+#define OMAP_PRM_HAS_RESETS (OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_RSTST)
+
+#define PRM_STATE_MAX_WAIT 10000
+#define PRM_LOGICRETSTATE BIT(2)
+#define PRM_LOWPOWERSTATECHANGE BIT(4)
+#define PRM_POWERSTATE_MASK OMAP_PRMD_ON_ACTIVE
+
+#define PRM_ST_INTRANSITION BIT(20)
+
+static const struct omap_prm_domain_map omap_prm_all = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_noinact = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION) |
+ BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_nooff = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+};
+
+static const struct omap_rst_map rst_map_0[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map rst_map_01[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = 1, .st = 1 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map rst_map_012[] = {
+ { .rst = 0, .st = 0 },
+ { .rst = 1, .st = 1 },
+ { .rst = 2, .st = 2 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data omap4_prm_data[] = {
+ { .name = "tesla", .base = 0x4a306400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "abe", .base = 0x4a306500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_all,
+ },
+ { .name = "core", .base = 0x4a306700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati", .rstmap = rst_map_012 },
+ { .name = "ivahd", .base = 0x4a306f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
+ { .name = "device", .base = 0x4a307b00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { },
+};
+
+static const struct omap_prm_data omap5_prm_data[] = {
+ { .name = "dsp", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "abe", .base = 0x4ae06500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_nooff,
+ },
+ { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu", .rstmap = rst_map_012 },
+ { .name = "iva", .base = 0x4ae07200, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
+ { .name = "device", .base = 0x4ae07c00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { },
+};
+
+static const struct omap_prm_data dra7_prm_data[] = {
+ { .name = "dsp1", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "ipu", .base = 0x4ae06500, .rstctrl = 0x10, .rstst = 0x14, .clkdm_name = "ipu1", .rstmap = rst_map_012 },
+ { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu2", .rstmap = rst_map_012 },
+ { .name = "iva", .base = 0x4ae06f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
+ { .name = "dsp2", .base = 0x4ae07b00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve1", .base = 0x4ae07b40, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve2", .base = 0x4ae07b80, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve3", .base = 0x4ae07bc0, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { .name = "eve4", .base = 0x4ae07c00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ { },
+};
+
+static const struct omap_rst_map am3_per_rst_map[] = {
+ { .rst = 1 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map am3_wkup_rst_map[] = {
+ { .rst = 3, .st = 5 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data am3_prm_data[] = {
+ { .name = "per", .base = 0x44e00c00, .rstctrl = 0x0, .rstmap = am3_per_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp" },
+ { .name = "wkup", .base = 0x44e00d00, .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { .name = "device", .base = 0x44e00f00, .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "gfx", .base = 0x44e01100,
+ .pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
+ { },
+};
+
+static const struct omap_rst_map am4_per_rst_map[] = {
+ { .rst = 1, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_rst_map am4_device_rst_map[] = {
+ { .rst = 0, .st = 1 },
+ { .rst = 1, .st = 0 },
+ { .rst = -1 },
+};
+
+static const struct omap_prm_data am4_prm_data[] = {
+ {
+ .name = "gfx", .base = 0x44df0400,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
+ { .name = "per", .base = 0x44df0800, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map, .clkdm_name = "pruss_ocp" },
+ { .name = "wkup", .base = 0x44df2000, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_NO_CLKDM },
+ { .name = "device", .base = 0x44df4000, .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ { },
+};
+
+static const struct of_device_id omap_prm_id_table[] = {
+ { .compatible = "ti,omap4-prm-inst", .data = omap4_prm_data },
+ { .compatible = "ti,omap5-prm-inst", .data = omap5_prm_data },
+ { .compatible = "ti,dra7-prm-inst", .data = dra7_prm_data },
+ { .compatible = "ti,am3-prm-inst", .data = am3_prm_data },
+ { .compatible = "ti,am4-prm-inst", .data = am4_prm_data },
+ { },
+};
+
+#ifdef DEBUG
+static void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+ dev_dbg(prmd->dev, "%s %s: %08x/%08x\n",
+ prmd->pd.name, desc,
+ readl_relaxed(prmd->prm->base + prmd->pwrstctrl),
+ readl_relaxed(prmd->prm->base + prmd->pwrstst));
+}
+#else
+static inline void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+}
+#endif
+
+static int omap_prm_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "on: previous state");
+
+ if (prmd->pwrstctrl_saved)
+ v = prmd->pwrstctrl_saved;
+ else
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+
+ writel_relaxed(v | OMAP_PRMD_ON_ACTIVE,
+ prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_err(prmd->dev, "%s: %s timed out\n",
+ prmd->pd.name, __func__);
+
+ omap_prm_domain_show_state(prmd, "on: new state");
+
+ return ret;
+}
+
+/* No need to check for holes in the mask for the lowest mode */
+static int omap_prm_domain_find_lowest(struct omap_prm_domain *prmd)
+{
+ return __ffs(prmd->cap->usable_modes);
+}
+
+static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "off: previous state");
+
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+ prmd->pwrstctrl_saved = v;
+
+ v &= ~PRM_POWERSTATE_MASK;
+ v |= omap_prm_domain_find_lowest(prmd);
+
+ if (prmd->cap->statechange)
+ v |= PRM_LOWPOWERSTATECHANGE;
+ if (prmd->cap->logicretstate)
+ v &= ~PRM_LOGICRETSTATE;
+ else
+ v |= PRM_LOGICRETSTATE;
+
+ writel_relaxed(v, prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_warn(prmd->dev, "%s: %s timed out\n",
+ __func__, prmd->pd.name);
+
+ omap_prm_domain_show_state(prmd, "off: new state");
+
+ return 0;
+}
+
+static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+ struct of_phandle_args pd_args;
+ struct omap_prm_domain *prmd;
+ struct device_node *np;
+ int ret;
+
+ prmd = genpd_to_prm_domain(domain);
+ np = dev->of_node;
+
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ if (pd_args.args_count != 0)
+ dev_warn(dev, "%s: unusupported #power-domain-cells: %i\n",
+ prmd->pd.name, pd_args.args_count);
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+
+ return 0;
+}
+
+static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+}
+
+static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
+{
+ struct omap_prm_domain *prmd;
+ struct device_node *np = dev->of_node;
+ const struct omap_prm_data *data;
+ const char *name;
+ int error;
+
+ if (!of_find_property(dev->of_node, "#power-domain-cells", NULL))
+ return 0;
+
+ of_node_put(dev->of_node);
+
+ prmd = devm_kzalloc(dev, sizeof(*prmd), GFP_KERNEL);
+ if (!prmd)
+ return -ENOMEM;
+
+ data = prm->data;
+ name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
+ data->name);
+
+ prmd->dev = dev;
+ prmd->prm = prm;
+ prmd->cap = prmd->prm->data->dmap;
+ prmd->pwrstctrl = prmd->prm->data->pwrstctrl;
+ prmd->pwrstst = prmd->prm->data->pwrstst;
+
+ prmd->pd.name = name;
+ prmd->pd.power_on = omap_prm_domain_power_on;
+ prmd->pd.power_off = omap_prm_domain_power_off;
+ prmd->pd.attach_dev = omap_prm_domain_attach_dev;
+ prmd->pd.detach_dev = omap_prm_domain_detach_dev;
+
+ pm_genpd_init(&prmd->pd, NULL, true);
+ error = of_genpd_add_provider_simple(np, &prmd->pd);
+ if (error)
+ pm_genpd_remove(&prmd->pd);
+ else
+ prm->prmd = prmd;
+
+ return error;
+}
+
+static bool _is_valid_reset(struct omap_reset_data *reset, unsigned long id)
+{
+ if (reset->mask & BIT(id))
+ return true;
+
+ return false;
+}
+
+static int omap_reset_get_st_bit(struct omap_reset_data *reset,
+ unsigned long id)
+{
+ const struct omap_rst_map *map = reset->prm->data->rstmap;
+
+ while (map->rst >= 0) {
+ if (map->rst == id)
+ return map->st;
+
+ map++;
+ }
+
+ return id;
+}
+
+static int omap_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ int st_bit = omap_reset_get_st_bit(reset, id);
+ bool has_rstst = reset->prm->data->rstst ||
+ (reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
+
+ /* Check if we have rstst */
+ if (!has_rstst)
+ return -ENOTSUPP;
+
+ /* Check if hw reset line is asserted */
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if (v & BIT(id))
+ return 1;
+
+ /*
+ * Check reset status, high value means reset sequence has been
+ * completed successfully so we can return 0 here (reset deasserted)
+ */
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstst);
+ v >>= st_bit;
+ v &= 1;
+
+ return !v;
+}
+
+static int omap_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ unsigned long flags;
+
+ /* assert the reset control line */
+ spin_lock_irqsave(&reset->lock, flags);
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ v |= 1 << id;
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ return 0;
+}
+
+static int omap_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+ u32 v;
+ int st_bit;
+ bool has_rstst;
+ unsigned long flags;
+ struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev);
+ int ret = 0;
+
+ /* Nothing to do if the reset is already deasserted */
+ if (!omap_reset_status(rcdev, id))
+ return 0;
+
+ has_rstst = reset->prm->data->rstst ||
+ (reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
+
+ if (has_rstst) {
+ st_bit = omap_reset_get_st_bit(reset, id);
+
+ /* Clear the reset status by writing 1 to the status bit */
+ v = 1 << st_bit;
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstst);
+ }
+
+ if (reset->clkdm)
+ pdata->clkdm_deny_idle(reset->clkdm);
+
+ /* de-assert the reset control line */
+ spin_lock_irqsave(&reset->lock, flags);
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ v &= ~(1 << id);
+ writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl);
+ spin_unlock_irqrestore(&reset->lock, flags);
+
+ /* wait for the reset bit to clear */
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ reset->prm->data->rstctrl,
+ v, !(v & BIT(id)), 1,
+ OMAP_RESET_MAX_WAIT);
+ if (ret)
+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ reset->prm->data->name, id);
+
+ /* wait for the status to be set */
+ if (has_rstst) {
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ reset->prm->data->rstst,
+ v, v & BIT(st_bit), 1,
+ OMAP_RESET_MAX_WAIT);
+ if (ret)
+ pr_err("%s: timedout waiting for %s:%lu\n", __func__,
+ reset->prm->data->name, id);
+ }
+
+ if (reset->clkdm)
+ pdata->clkdm_allow_idle(reset->clkdm);
+
+ return ret;
+}
+
+static const struct reset_control_ops omap_reset_ops = {
+ .assert = omap_reset_assert,
+ .deassert = omap_reset_deassert,
+ .status = omap_reset_status,
+};
+
+static int omap_prm_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ struct omap_reset_data *reset = to_omap_reset_data(rcdev);
+
+ if (!_is_valid_reset(reset, reset_spec->args[0]))
+ return -EINVAL;
+
+ return reset_spec->args[0];
+}
+
+static int omap_prm_reset_init(struct platform_device *pdev,
+ struct omap_prm *prm)
+{
+ struct omap_reset_data *reset;
+ const struct omap_rst_map *map;
+ struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ char buf[32];
+ u32 v;
+
+ /*
+ * Check if we have controllable resets. If either rstctrl is non-zero
+ * or OMAP_PRM_HAS_RSTCTRL flag is set, we have reset control register
+ * for the domain.
+ */
+ if (!prm->data->rstctrl && !(prm->data->flags & OMAP_PRM_HAS_RSTCTRL))
+ return 0;
+
+ /* Check if we have the pdata callbacks in place */
+ if (!pdata || !pdata->clkdm_lookup || !pdata->clkdm_deny_idle ||
+ !pdata->clkdm_allow_idle)
+ return -EINVAL;
+
+ map = prm->data->rstmap;
+ if (!map)
+ return -EINVAL;
+
+ reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.ops = &omap_reset_ops;
+ reset->rcdev.of_node = pdev->dev.of_node;
+ reset->rcdev.nr_resets = OMAP_MAX_RESETS;
+ reset->rcdev.of_xlate = omap_prm_reset_xlate;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->dev = &pdev->dev;
+ spin_lock_init(&reset->lock);
+
+ reset->prm = prm;
+
+ sprintf(buf, "%s_clkdm", prm->data->clkdm_name ? prm->data->clkdm_name :
+ prm->data->name);
+
+ if (!(prm->data->flags & OMAP_PRM_HAS_NO_CLKDM)) {
+ reset->clkdm = pdata->clkdm_lookup(buf);
+ if (!reset->clkdm)
+ return -EINVAL;
+ }
+
+ while (map->rst >= 0) {
+ reset->mask |= BIT(map->rst);
+ map++;
+ }
+
+ /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+ if (prm->data->rstmap == rst_map_012) {
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if ((v & reset->mask) != reset->mask) {
+ dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+ writel_relaxed(reset->mask, reset->prm->base +
+ reset->prm->data->rstctrl);
+ }
+ }
+
+ return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
+}
+
+static int omap_prm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct omap_prm_data *data;
+ struct omap_prm *prm;
+ const struct of_device_id *match;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ match = of_match_device(omap_prm_id_table, &pdev->dev);
+ if (!match)
+ return -ENOTSUPP;
+
+ prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
+ if (!prm)
+ return -ENOMEM;
+
+ data = match->data;
+
+ while (data->base != res->start) {
+ if (!data->base)
+ return -EINVAL;
+ data++;
+ }
+
+ prm->data = data;
+
+ prm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(prm->base))
+ return PTR_ERR(prm->base);
+
+ ret = omap_prm_domain_init(&pdev->dev, prm);
+ if (ret)
+ return ret;
+
+ ret = omap_prm_reset_init(pdev, prm);
+ if (ret)
+ goto err_domain;
+
+ return 0;
+
+err_domain:
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&prm->prmd->pd);
+
+ return ret;
+}
+
+static struct platform_driver omap_prm_driver = {
+ .probe = omap_prm_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = omap_prm_id_table,
+ },
+};
+builtin_platform_driver(omap_prm_driver);
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index ccc6d53..dc21aa8 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -39,6 +40,8 @@
#define GIC_INT_SET_PENDING_BASE 0x200
#define AM43XX_GIC_DIST_BASE 0x48241000
+static void __iomem *rtc_base_virt;
+static struct clk *rtc_fck;
static u32 rtc_magic_val;
static int (*am33xx_do_wfi_sram)(unsigned long unused);
@@ -90,7 +93,7 @@
ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
ro_sram_data.amx3_pm_sram_data_phys =
gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
- ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+ ro_sram_data.rtc_base_virt = rtc_base_virt;
/* Save physical address to calculate resume offset during pm init */
am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
@@ -130,6 +133,19 @@
return 0;
}
+static int am33xx_do_sram_idle(u32 wfi_flags)
+{
+ int ret = 0;
+
+ if (!m3_ipc || !pm_ops)
+ return 0;
+
+ if (wfi_flags & WFI_FLAG_WAKE_M3)
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+
+ return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
+}
+
static int __init am43xx_map_gic(void)
{
gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
@@ -145,7 +161,7 @@
{
u32 i;
- i = __raw_readl(pm_ops->get_rtc_base_addr() + 0x44) & 0x40;
+ i = __raw_readl(rtc_base_virt + 0x44) & 0x40;
if (i) {
retrigger_irq = rtc_alarm_wakeup.irq_nr;
@@ -164,13 +180,24 @@
return 0;
}
+/*
+ * Note that the RTC module clock must be re-enabled only for rtc+ddr suspend.
+ * And looks like the module can stay in SYSC_IDLE_SMART_WKUP mode configured
+ * by the interconnect code just fine for both rtc+ddr suspend and retention
+ * suspend.
+ */
static int am33xx_pm_suspend(suspend_state_t suspend_state)
{
int i, ret = 0;
if (suspend_state == PM_SUSPEND_MEM &&
pm_ops->check_off_mode_enable()) {
- pm_ops->prepare_rtc_suspend();
+ ret = clk_prepare_enable(rtc_fck);
+ if (ret) {
+ dev_err(pm33xx_dev, "Failed to enable clock: %i\n", ret);
+ return ret;
+ }
+
pm_ops->save_context();
suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
clk_save_context();
@@ -223,7 +250,7 @@
}
if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
- pm_ops->prepare_rtc_resume();
+ clk_disable_unprepare(rtc_fck);
return ret;
}
@@ -260,6 +287,8 @@
rtc_only_idle = 0;
}
+ pm_ops->begin_suspend();
+
switch (state) {
case PM_SUSPEND_MEM:
ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
@@ -301,6 +330,8 @@
}
rtc_only_idle = 0;
+
+ pm_ops->finish_suspend();
}
static int am33xx_pm_valid(suspend_state_t state)
@@ -408,14 +439,28 @@
struct device_node *np;
unsigned long val = 0;
struct nvmem_device *nvmem;
+ int error;
np = of_find_node_by_name(NULL, "rtc");
if (of_device_is_available(np)) {
+ /* RTC interconnect target module clock */
+ rtc_fck = of_clk_get_by_name(np->parent, "fck");
+ if (IS_ERR(rtc_fck))
+ return PTR_ERR(rtc_fck);
+
+ rtc_base_virt = of_iomap(np, 0);
+ if (!rtc_base_virt) {
+ pr_warn("PM: could not iomap rtc");
+ error = -ENODEV;
+ goto err_clk_put;
+ }
+
omap_rtc = rtc_class_open("rtc0");
if (!omap_rtc) {
pr_warn("PM: rtc0 not available");
- return -EPROBE_DEFER;
+ error = -EPROBE_DEFER;
+ goto err_iounmap;
}
nvmem = devm_nvmem_device_get(&omap_rtc->dev,
@@ -437,6 +482,13 @@
}
return 0;
+
+err_iounmap:
+ iounmap(rtc_base_virt);
+err_clk_put:
+ clk_put(rtc_fck);
+
+ return error;
}
static int am33xx_pm_probe(struct platform_device *pdev)
@@ -484,7 +536,7 @@
ret = am33xx_push_sram_idle();
if (ret)
- goto err_free_sram;
+ goto err_unsetup_rtc;
am33xx_pm_set_ipc_ops();
@@ -503,7 +555,7 @@
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
#endif /* CONFIG_SUSPEND */
- ret = pm_ops->init();
+ ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
ret = -ENODEV;
@@ -514,6 +566,9 @@
err_put_wkup_m3_ipc:
wkup_m3_ipc_put(m3_ipc);
+err_unsetup_rtc:
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
err_free_sram:
am33xx_pm_free_sram();
pm33xx_dev = NULL;
@@ -522,9 +577,13 @@
static int am33xx_pm_remove(struct platform_device *pdev)
{
+ if (pm_ops->deinit)
+ pm_ops->deinit();
suspend_set_ops(NULL);
wkup_m3_ipc_put(m3_ipc);
am33xx_pm_free_sram();
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
return 0;
}
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
new file mode 100644
index 0000000..3069517
--- /dev/null
+++ b/drivers/soc/ti/pruss.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS platform driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pruss_driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/**
+ * struct pruss_private_data - PRUSS driver private data
+ * @has_no_sharedram: flag to indicate the absence of PRUSS Shared Data RAM
+ * @has_core_mux_clock: flag to indicate the presence of PRUSS core clock
+ */
+struct pruss_private_data {
+ bool has_no_sharedram;
+ bool has_core_mux_clock;
+};
+
+static void pruss_of_free_clk_provider(void *data)
+{
+ struct device_node *clk_mux_np = data;
+
+ of_clk_del_provider(clk_mux_np);
+ of_node_put(clk_mux_np);
+}
+
+static int pruss_clk_mux_setup(struct pruss *pruss, struct clk *clk_mux,
+ char *mux_name, struct device_node *clks_np)
+{
+ struct device_node *clk_mux_np;
+ struct device *dev = pruss->dev;
+ char *clk_mux_name;
+ unsigned int num_parents;
+ const char **parent_names;
+ void __iomem *reg;
+ u32 reg_offset;
+ int ret;
+
+ clk_mux_np = of_get_child_by_name(clks_np, mux_name);
+ if (!clk_mux_np) {
+ dev_err(dev, "%pOF is missing its '%s' node\n", clks_np,
+ mux_name);
+ return -ENODEV;
+ }
+
+ num_parents = of_clk_get_parent_count(clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(dev, "mux-clock %pOF must have parents\n", clk_mux_np);
+ ret = -EINVAL;
+ goto put_clk_mux_np;
+ }
+
+ parent_names = devm_kcalloc(dev, sizeof(*parent_names), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ of_clk_parent_fill(clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(dev), clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ ret = of_property_read_u32(clk_mux_np, "reg", ®_offset);
+ if (ret)
+ goto put_clk_mux_np;
+
+ reg = pruss->cfg_base + reg_offset;
+
+ clk_mux = clk_register_mux(NULL, clk_mux_name, parent_names,
+ num_parents, 0, reg, 0, 1, 0, NULL);
+ if (IS_ERR(clk_mux)) {
+ ret = PTR_ERR(clk_mux);
+ goto put_clk_mux_np;
+ }
+
+ ret = devm_add_action_or_reset(dev, (void(*)(void *))clk_unregister_mux,
+ clk_mux);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux unregister action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ ret = of_clk_add_provider(clk_mux_np, of_clk_src_simple_get, clk_mux);
+ if (ret)
+ goto put_clk_mux_np;
+
+ ret = devm_add_action_or_reset(dev, pruss_of_free_clk_provider,
+ clk_mux_np);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux free action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ return 0;
+
+put_clk_mux_np:
+ of_node_put(clk_mux_np);
+ return ret;
+}
+
+static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
+{
+ const struct pruss_private_data *data;
+ struct device_node *clks_np;
+ struct device *dev = pruss->dev;
+ int ret = 0;
+
+ data = of_device_get_match_data(dev);
+ if (IS_ERR(data))
+ return -ENODEV;
+
+ clks_np = of_get_child_by_name(cfg_node, "clocks");
+ if (!clks_np) {
+ dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
+ return -ENODEV;
+ }
+
+ if (data && data->has_core_mux_clock) {
+ ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux,
+ "coreclk-mux", clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto put_clks_node;
+ }
+ }
+
+ ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux",
+ clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup iepclk-mux\n");
+ goto put_clks_node;
+ }
+
+put_clks_node:
+ of_node_put(clks_np);
+
+ return ret;
+}
+
+static struct regmap_config regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int pruss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct pruss *pruss;
+ struct resource res;
+ int ret, i, index;
+ const struct pruss_private_data *data;
+ const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (IS_ERR(data)) {
+ dev_err(dev, "missing private data\n");
+ return -ENODEV;
+ }
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set the DMA coherent mask");
+ return ret;
+ }
+
+ pruss = devm_kzalloc(dev, sizeof(*pruss), GFP_KERNEL);
+ if (!pruss)
+ return -ENOMEM;
+
+ pruss->dev = dev;
+
+ child = of_get_child_by_name(np, "memories");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'memories' node\n", child);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < PRUSS_MEM_MAX; i++) {
+ /*
+ * On AM437x one of two PRUSS units don't contain Shared RAM,
+ * skip it
+ */
+ if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
+ continue;
+
+ index = of_property_match_string(child, "reg-names",
+ mem_names[i]);
+ if (index < 0) {
+ of_node_put(child);
+ return index;
+ }
+
+ if (of_address_to_resource(child, index, &res)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!pruss->mem_regions[i].va) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ of_node_put(child);
+ return -ENOMEM;
+ }
+ pruss->mem_regions[i].pa = res.start;
+ pruss->mem_regions[i].size = resource_size(&res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pruss->mem_regions[i].pa,
+ pruss->mem_regions[i].size, pruss->mem_regions[i].va);
+ }
+ of_node_put(child);
+
+ platform_set_drvdata(pdev, pruss);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "couldn't enable module\n");
+ pm_runtime_put_noidle(dev);
+ goto rpm_disable;
+ }
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ ret = -ENODEV;
+ goto rpm_put;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ ®map_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto node_put;
+ }
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "failed to register child devices\n");
+ goto node_put;
+ }
+
+ of_node_put(child);
+
+ return 0;
+
+node_put:
+ of_node_put(child);
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int pruss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ devm_of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+/* instance-specific driver private data */
+static const struct pruss_private_data am437x_pruss1_data = {
+ .has_no_sharedram = false,
+};
+
+static const struct pruss_private_data am437x_pruss0_data = {
+ .has_no_sharedram = true,
+};
+
+static const struct pruss_private_data am65x_j721e_pruss_data = {
+ .has_core_mux_clock = true,
+};
+
+static const struct of_device_id pruss_of_match[] = {
+ { .compatible = "ti,am3356-pruss" },
+ { .compatible = "ti,am4376-pruss0", .data = &am437x_pruss0_data, },
+ { .compatible = "ti,am4376-pruss1", .data = &am437x_pruss1_data, },
+ { .compatible = "ti,am5728-pruss" },
+ { .compatible = "ti,k2g-pruss" },
+ { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pruss_of_match);
+
+static struct platform_driver pruss_driver = {
+ .driver = {
+ .name = "pruss",
+ .of_match_table = pruss_of_match,
+ },
+ .probe = pruss_probe,
+ .remove = pruss_remove,
+};
+module_platform_driver(pruss_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_DESCRIPTION("PRU-ICSS Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
new file mode 100644
index 0000000..5376f3d
--- /dev/null
+++ b/drivers/soc/ti/smartreflex.c
@@ -0,0 +1,1045 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP SmartReflex Voltage Control
+ *
+ * Author: Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/power/smartreflex.h>
+
+#define DRIVER_NAME "smartreflex"
+#define SMARTREFLEX_NAME_LEN 32
+#define NVALUE_NAME_LEN 40
+#define SR_DISABLE_TIMEOUT 200
+
+/* sr_list contains all the instances of smartreflex module */
+static LIST_HEAD(sr_list);
+
+static struct omap_sr_class_data *sr_class;
+static struct dentry *sr_dbg_dir;
+
+static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
+{
+ __raw_writel(value, (sr->base + offset));
+}
+
+static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
+ u32 value)
+{
+ u32 reg_val;
+
+ /*
+ * Smartreflex error config register is special as it contains
+ * certain status bits which if written a 1 into means a clear
+ * of those bits. So in order to make sure no accidental write of
+ * 1 happens to those status bits, do a clear of them in the read
+ * value. This mean this API doesn't rewrite values in these bits
+ * if they are currently set, but does allow the caller to write
+ * those bits.
+ */
+ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1)
+ mask |= ERRCONFIG_STATUS_V1_MASK;
+ else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2)
+ mask |= ERRCONFIG_VPBOUNDINTST_V2;
+
+ reg_val = __raw_readl(sr->base + offset);
+ reg_val &= ~mask;
+
+ value &= mask;
+
+ reg_val |= value;
+
+ __raw_writel(reg_val, (sr->base + offset));
+}
+
+static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset)
+{
+ return __raw_readl(sr->base + offset);
+}
+
+static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr_info;
+
+ if (!voltdm) {
+ pr_err("%s: Null voltage domain passed!\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(sr_info, &sr_list, node) {
+ if (voltdm == sr_info->voltdm)
+ return sr_info;
+ }
+
+ return ERR_PTR(-ENODATA);
+}
+
+static irqreturn_t sr_interrupt(int irq, void *data)
+{
+ struct omap_sr *sr_info = data;
+ u32 status = 0;
+
+ switch (sr_info->ip_type) {
+ case SR_TYPE_V1:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, ERRCONFIG_V1);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, ERRCONFIG_V1, status);
+ break;
+ case SR_TYPE_V2:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, IRQSTATUS);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, IRQSTATUS, status);
+ break;
+ default:
+ dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n",
+ sr_info->ip_type);
+ return IRQ_NONE;
+ }
+
+ if (sr_class->notify)
+ sr_class->notify(sr_info, status);
+
+ return IRQ_HANDLED;
+}
+
+static void sr_set_clk_length(struct omap_sr *sr)
+{
+ struct clk *fck;
+ u32 fclk_speed;
+
+ /* Try interconnect target module fck first if it already exists */
+ fck = clk_get(sr->pdev->dev.parent, "fck");
+ if (IS_ERR(fck)) {
+ fck = clk_get(&sr->pdev->dev, "fck");
+ if (IS_ERR(fck)) {
+ dev_err(&sr->pdev->dev,
+ "%s: unable to get fck for device %s\n",
+ __func__, dev_name(&sr->pdev->dev));
+ return;
+ }
+ }
+
+ fclk_speed = clk_get_rate(fck);
+ clk_put(fck);
+
+ switch (fclk_speed) {
+ case 12000000:
+ sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
+ break;
+ case 13000000:
+ sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK;
+ break;
+ case 19200000:
+ sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK;
+ break;
+ case 26000000:
+ sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK;
+ break;
+ case 38400000:
+ sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Invalid fclk rate: %d\n",
+ __func__, fclk_speed);
+ break;
+ }
+}
+
+static void sr_start_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (!sr_class->enable(sr))
+ sr->autocomp_active = true;
+}
+
+static void sr_stop_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (sr->autocomp_active) {
+ sr_class->disable(sr, 1);
+ sr->autocomp_active = false;
+ }
+}
+
+/*
+ * This function handles the initializations which have to be done
+ * only when both sr device and class driver regiter has
+ * completed. This will be attempted to be called from both sr class
+ * driver register and sr device intializtion API's. Only one call
+ * will ultimately succeed.
+ *
+ * Currently this function registers interrupt handler for a particular SR
+ * if smartreflex class driver is already registered and has
+ * requested for interrupts and the SR interrupt line in present.
+ */
+static int sr_late_init(struct omap_sr *sr_info)
+{
+ struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
+ int ret = 0;
+
+ if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
+ ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq,
+ sr_interrupt, 0, sr_info->name, sr_info);
+ if (ret)
+ goto error;
+ disable_irq(sr_info->irq);
+ }
+
+ if (pdata && pdata->enable_on_init)
+ sr_start_vddautocomp(sr_info);
+
+ return ret;
+
+error:
+ list_del(&sr_info->node);
+ dev_err(&sr_info->pdev->dev, "%s: ERROR in registering interrupt handler. Smartreflex will not function as desired\n",
+ __func__);
+
+ return ret;
+}
+
+static void sr_v1_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+ int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTST;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /* Disable all other SR interrupts and clear the status as needed */
+ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1)
+ errconf_val |= ERRCONFIG_VPBOUNDINTST_V1;
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
+ errconf_val);
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
+ ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN,
+ ERRCONFIG_MCUDISACKINTST);
+}
+
+static void sr_v2_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /*
+ * Disable all other SR interrupts and clear the status
+ * write to status register ONLY on need basis - only if status
+ * is set.
+ */
+ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2)
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ ERRCONFIG_VPBOUNDINTST_V2);
+ else
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ 0x0);
+ sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
+ IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT));
+ sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
+ IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT));
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) &
+ IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
+}
+
+static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row(
+ struct omap_sr *sr, u32 efuse_offs)
+{
+ int i;
+
+ if (!sr->nvalue_table) {
+ dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
+ __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < sr->nvalue_count; i++) {
+ if (sr->nvalue_table[i].efuse_offs == efuse_offs)
+ return &sr->nvalue_table[i];
+ }
+
+ return NULL;
+}
+
+/* Public Functions */
+
+/**
+ * sr_configure_errgen() - Configures the SmartReflex to perform AVS using the
+ * error generator module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the error generator module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_errgen(struct omap_sr *sr)
+{
+ u32 sr_config, sr_errconfig, errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) |
+ (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) |
+ (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT);
+ sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK |
+ SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK),
+ sr_errconfig);
+
+ /* Enabling the interrupts if the ERROR module is used */
+ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st),
+ vpboundint_en);
+
+ return 0;
+}
+
+/**
+ * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable the error generator module inside the smartreflex module.
+ *
+ * Returns 0 on success and error value in case of failure.
+ */
+int sr_disable_errgen(struct omap_sr *sr)
+{
+ u32 errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Disable the Sensor and errorgen */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0);
+
+ /*
+ * Disable the interrupts of ERROR module
+ * NOTE: modify is a read, modify,write - an implicit OCP barrier
+ * which is required is present here - sequencing is critical
+ * at this point (after errgen is disabled, vpboundint disable)
+ */
+ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0);
+
+ return 0;
+}
+
+/**
+ * sr_configure_minmax() - Configures the SmartReflex to perform AVS using the
+ * minmaxavg module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the minmaxavg module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_minmax(struct omap_sr *sr)
+{
+ u32 sr_config, sr_avgwt;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE |
+ (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) |
+ (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT);
+ sr_write_reg(sr, AVGWEIGHT, sr_avgwt);
+
+ /*
+ * Enabling the interrupts if MINMAXAVG module is used.
+ * TODO: check if all the interrupts are mandatory
+ */
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN),
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
+ ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
+ break;
+ case SR_TYPE_V2:
+ sr_write_reg(sr, IRQSTATUS,
+ IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQENABLE_SET,
+ IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sr_enable() - Enables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ * @volt: The voltage at which the Voltage domain associated with
+ * the smartreflex module is operating at.
+ * This is required only to program the correct Ntarget value.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * enable a smartreflex module. Returns 0 on success. Returns error
+ * value if the voltage passed is wrong or if ntarget value is wrong.
+ */
+int sr_enable(struct omap_sr *sr, unsigned long volt)
+{
+ struct omap_volt_data *volt_data;
+ struct omap_sr_nvalue_table *nvalue_row;
+ int ret;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
+
+ if (IS_ERR(volt_data)) {
+ dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table for nominal voltage %ld\n",
+ __func__, volt);
+ return PTR_ERR(volt_data);
+ }
+
+ nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs);
+
+ if (!nvalue_row) {
+ dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n",
+ __func__, volt);
+ return -ENODATA;
+ }
+
+ /* errminlimit is opp dependent and hence linked to voltage */
+ sr->err_minlimit = nvalue_row->errminlimit;
+
+ pm_runtime_get_sync(&sr->pdev->dev);
+
+ /* Check if SR is already enabled. If yes do nothing */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
+ return 0;
+
+ /* Configure SR */
+ ret = sr_class->configure(sr);
+ if (ret)
+ return ret;
+
+ sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue);
+
+ /* SRCONFIG - enable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
+ return 0;
+}
+
+/**
+ * sr_disable() - Disables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable a smartreflex module.
+ */
+void sr_disable(struct omap_sr *sr)
+{
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return;
+ }
+
+ /* Check if SR clocks are already disabled. If yes do nothing */
+ if (pm_runtime_suspended(&sr->pdev->dev))
+ return;
+
+ /*
+ * Disable SR if only it is indeed enabled. Else just
+ * disable the clocks.
+ */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_v1_disable(sr);
+ break;
+ case SR_TYPE_V2:
+ sr_v2_disable(sr);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n",
+ sr->ip_type);
+ }
+ }
+
+ pm_runtime_put_sync_suspend(&sr->pdev->dev);
+}
+
+/**
+ * sr_register_class() - API to register a smartreflex class parameters.
+ * @class_data: The structure containing various sr class specific data.
+ *
+ * This API is to be called by the smartreflex class driver to register itself
+ * with the smartreflex driver during init. Returns 0 on success else the
+ * error value.
+ */
+int sr_register_class(struct omap_sr_class_data *class_data)
+{
+ struct omap_sr *sr_info;
+
+ if (!class_data) {
+ pr_warn("%s:, Smartreflex class data passed is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sr_class) {
+ pr_warn("%s: Smartreflex class driver already registered\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ sr_class = class_data;
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ list_for_each_entry(sr_info, &sr_list, node)
+ sr_late_init(sr_info);
+
+ return 0;
+}
+
+/**
+ * omap_sr_enable() - API to enable SR clocks and to call into the
+ * registered smartreflex class enable API.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to enable
+ * a particular smartreflex module. This API will do the initial
+ * configurations to turn on the smartreflex module and in turn call
+ * into the registered smartreflex class enable API.
+ */
+void omap_sr_enable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->enable(sr);
+}
+
+/**
+ * omap_sr_disable() - API to disable SR without resetting the voltage
+ * processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable not to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 0);
+}
+
+/**
+ * omap_sr_disable_reset_volt() - API to disable SR and reset the
+ * voltage processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 1);
+}
+
+/* PM Debug FS entries to enable and disable smartreflex. */
+static int omap_sr_autocomp_show(void *data, u64 *val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ *val = sr_info->autocomp_active;
+
+ return 0;
+}
+
+static int omap_sr_autocomp_store(void *data, u64 val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (val > 1) {
+ pr_warn("%s: Invalid argument %lld\n", __func__, val);
+ return -EINVAL;
+ }
+
+ /* control enable/disable only if there is a delta in value */
+ if (sr_info->autocomp_active != val) {
+ if (!val)
+ sr_stop_vddautocomp(sr_info);
+ else
+ sr_start_vddautocomp(sr_info);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
+ omap_sr_autocomp_store, "%llu\n");
+
+static int omap_sr_probe(struct platform_device *pdev)
+{
+ struct omap_sr *sr_info;
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct resource *mem, *irq;
+ struct dentry *nvalue_dir;
+ int i, ret = 0;
+
+ sr_info = devm_kzalloc(&pdev->dev, sizeof(struct omap_sr), GFP_KERNEL);
+ if (!sr_info)
+ return -ENOMEM;
+
+ sr_info->name = devm_kzalloc(&pdev->dev,
+ SMARTREFLEX_NAME_LEN, GFP_KERNEL);
+ if (!sr_info->name)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sr_info);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sr_info->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(sr_info->base)) {
+ dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
+ return PTR_ERR(sr_info->base);
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
+
+ snprintf(sr_info->name, SMARTREFLEX_NAME_LEN, "%s", pdata->name);
+
+ sr_info->pdev = pdev;
+ sr_info->srid = pdev->id;
+ sr_info->voltdm = pdata->voltdm;
+ sr_info->nvalue_table = pdata->nvalue_table;
+ sr_info->nvalue_count = pdata->nvalue_count;
+ sr_info->senn_mod = pdata->senn_mod;
+ sr_info->senp_mod = pdata->senp_mod;
+ sr_info->err_weight = pdata->err_weight;
+ sr_info->err_maxlimit = pdata->err_maxlimit;
+ sr_info->accum_data = pdata->accum_data;
+ sr_info->senn_avgweight = pdata->senn_avgweight;
+ sr_info->senp_avgweight = pdata->senp_avgweight;
+ sr_info->autocomp_active = false;
+ sr_info->ip_type = pdata->ip_type;
+
+ if (irq)
+ sr_info->irq = irq->start;
+
+ sr_set_clk_length(sr_info);
+
+ list_add(&sr_info->node, &sr_list);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ goto err_list_del;
+ }
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ if (sr_class) {
+ ret = sr_late_init(sr_info);
+ if (ret) {
+ pr_warn("%s: Error in SR late init\n", __func__);
+ goto err_list_del;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
+ if (!sr_dbg_dir)
+ sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
+
+ sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
+
+ debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
+ sr_info, &pm_sr_fops);
+ debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_weight);
+ debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_maxlimit);
+
+ nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
+
+ if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) {
+ dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n",
+ __func__, sr_info->name);
+
+ ret = -ENODATA;
+ goto err_debugfs;
+ }
+
+ for (i = 0; i < sr_info->nvalue_count; i++) {
+ char name[NVALUE_NAME_LEN + 1];
+
+ snprintf(name, sizeof(name), "volt_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].nvalue));
+ snprintf(name, sizeof(name), "errminlimit_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].errminlimit));
+
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+
+err_debugfs:
+ debugfs_remove_recursive(sr_info->dbg_dir);
+err_list_del:
+ list_del(&sr_info->node);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static int omap_sr_remove(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return PTR_ERR(sr_info);
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+ debugfs_remove_recursive(sr_info->dbg_dir);
+
+ pm_runtime_disable(&pdev->dev);
+ list_del(&sr_info->node);
+ return 0;
+}
+
+static void omap_sr_shutdown(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return;
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+
+ return;
+}
+
+static const struct of_device_id omap_sr_match[] = {
+ { .compatible = "ti,omap3-smartreflex-core", },
+ { .compatible = "ti,omap3-smartreflex-mpu-iva", },
+ { .compatible = "ti,omap4-smartreflex-core", },
+ { .compatible = "ti,omap4-smartreflex-mpu", },
+ { .compatible = "ti,omap4-smartreflex-iva", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_sr_match);
+
+static struct platform_driver smartreflex_driver = {
+ .probe = omap_sr_probe,
+ .remove = omap_sr_remove,
+ .shutdown = omap_sr_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = omap_sr_match,
+ },
+};
+
+static int __init sr_init(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&smartreflex_driver);
+ if (ret) {
+ pr_err("%s: platform driver register failed for SR\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(sr_init);
+
+static void __exit sr_exit(void)
+{
+ platform_driver_unregister(&smartreflex_driver);
+}
+module_exit(sr_exit);
+
+MODULE_DESCRIPTION("OMAP Smartreflex Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8c2a2f2..8afb3f4 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -9,7 +9,6 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -18,150 +17,95 @@
#include <dt-bindings/soc/ti,sci_pm_domain.h>
/**
- * struct ti_sci_genpd_dev_data: holds data needed for every device attached
- * to this genpd
- * @idx: index of the device that identifies it with the system
- * control processor.
- * @exclusive: Permissions for exclusive request or shared request of the
- * device.
+ * struct ti_sci_genpd_provider: holds common TI SCI genpd provider data
+ * @ti_sci: handle to TI SCI protocol driver that provides ops to
+ * communicate with system control processor.
+ * @dev: pointer to dev for the driver for devm allocs
+ * @pd_list: list of all the power domains on the device
+ * @data: onecell data for genpd core
*/
-struct ti_sci_genpd_dev_data {
- int idx;
- u8 exclusive;
+struct ti_sci_genpd_provider {
+ const struct ti_sci_handle *ti_sci;
+ struct device *dev;
+ struct list_head pd_list;
+ struct genpd_onecell_data data;
};
/**
* struct ti_sci_pm_domain: TI specific data needed for power domain
- * @ti_sci: handle to TI SCI protocol driver that provides ops to
- * communicate with system control processor.
- * @dev: pointer to dev for the driver for devm allocs
+ * @idx: index of the device that identifies it with the system
+ * control processor.
+ * @exclusive: Permissions for exclusive request or shared request of the
+ * device.
* @pd: generic_pm_domain for use with the genpd framework
+ * @node: link for the genpd list
+ * @parent: link to the parent TI SCI genpd provider
*/
struct ti_sci_pm_domain {
- const struct ti_sci_handle *ti_sci;
- struct device *dev;
+ int idx;
+ u8 exclusive;
struct generic_pm_domain pd;
+ struct list_head node;
+ struct ti_sci_genpd_provider *parent;
};
#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
-/**
- * ti_sci_dev_id(): get prepopulated ti_sci id from struct dev
- * @dev: pointer to device associated with this genpd
- *
- * Returns device_id stored from ti,sci_id property
+/*
+ * ti_sci_pd_power_off(): genpd power down hook
+ * @domain: pointer to the powerdomain to power off
*/
-static int ti_sci_dev_id(struct device *dev)
+static int ti_sci_pd_power_off(struct generic_pm_domain *domain)
{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
- return sci_dev_data->idx;
+ return ti_sci->ops.dev_ops.put_device(ti_sci, pd->idx);
}
-static u8 is_ti_sci_dev_exclusive(struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- return sci_dev_data->exclusive;
-}
-
-/**
- * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle
- * @dev: pointer to device associated with this genpd
- *
- * Returns ti_sci_handle to be used to communicate with system
- * control processor.
+/*
+ * ti_sci_pd_power_on(): genpd power up hook
+ * @domain: pointer to the powerdomain to power on
*/
-static const struct ti_sci_handle *ti_sci_dev_to_sci_handle(struct device *dev)
+static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
{
- struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain);
- struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(pd);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
- return ti_sci_genpd->ti_sci;
-}
-
-/**
- * ti_sci_dev_start(): genpd device start hook called to turn device on
- * @dev: pointer to device associated with this genpd to be powered on
- */
-static int ti_sci_dev_start(struct device *dev)
-{
- const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
- int idx = ti_sci_dev_id(dev);
-
- if (is_ti_sci_dev_exclusive(dev))
- return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci, idx);
+ if (pd->exclusive)
+ return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci,
+ pd->idx);
else
- return ti_sci->ops.dev_ops.get_device(ti_sci, idx);
+ return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
}
-/**
- * ti_sci_dev_stop(): genpd device stop hook called to turn device off
- * @dev: pointer to device associated with this genpd to be powered off
+/*
+ * ti_sci_pd_xlate(): translation service for TI SCI genpds
+ * @genpdspec: DT identification data for the genpd
+ * @data: genpd core data for all the powerdomains on the device
*/
-static int ti_sci_dev_stop(struct device *dev)
+static struct generic_pm_domain *ti_sci_pd_xlate(
+ struct of_phandle_args *genpdspec,
+ void *data)
{
- const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
- int idx = ti_sci_dev_id(dev);
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
- return ti_sci->ops.dev_ops.put_device(ti_sci, idx);
-}
+ if (genpdspec->args_count != 1 && genpdspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
-static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain,
- struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct of_phandle_args pd_args;
- struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(domain);
- const struct ti_sci_handle *ti_sci = ti_sci_genpd->ti_sci;
- struct ti_sci_genpd_dev_data *sci_dev_data;
- struct generic_pm_domain_data *genpd_data;
- int idx, ret = 0;
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
- ret = of_parse_phandle_with_args(np, "power-domains",
- "#power-domain-cells", 0, &pd_args);
- if (ret < 0)
- return ret;
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
- if (pd_args.args_count != 1 && pd_args.args_count != 2)
- return -EINVAL;
+ genpd_to_ti_sci_pd(genpd_data->domains[idx])->exclusive =
+ genpdspec->args[1];
- idx = pd_args.args[0];
-
- /*
- * Check the validity of the requested idx, if the index is not valid
- * the PMMC will return a NAK here and we will not allocate it.
- */
- ret = ti_sci->ops.dev_ops.is_valid(ti_sci, idx);
- if (ret)
- return -EINVAL;
-
- sci_dev_data = kzalloc(sizeof(*sci_dev_data), GFP_KERNEL);
- if (!sci_dev_data)
- return -ENOMEM;
-
- sci_dev_data->idx = idx;
- /* Enable the exclusive permissions by default */
- sci_dev_data->exclusive = TI_SCI_PD_EXCLUSIVE;
- if (pd_args.args_count == 2)
- sci_dev_data->exclusive = pd_args.args[1] & 0x1;
-
- genpd_data = dev_gpd_data(dev);
- genpd_data->data = sci_dev_data;
-
- return 0;
-}
-
-static void ti_sci_pd_detach_dev(struct generic_pm_domain *domain,
- struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- kfree(sci_dev_data);
- genpd_data->data = NULL;
+ return genpd_data->domains[idx];
}
static const struct of_device_id ti_sci_pm_domain_matches[] = {
@@ -173,33 +117,80 @@
static int ti_sci_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct ti_sci_pm_domain *ti_sci_pd;
+ struct ti_sci_genpd_provider *pd_provider;
+ struct ti_sci_pm_domain *pd;
+ struct device_node *np = NULL;
+ struct of_phandle_args args;
int ret;
+ u32 max_id = 0;
+ int index;
- ti_sci_pd = devm_kzalloc(dev, sizeof(*ti_sci_pd), GFP_KERNEL);
- if (!ti_sci_pd)
+ pd_provider = devm_kzalloc(dev, sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
return -ENOMEM;
- ti_sci_pd->ti_sci = devm_ti_sci_get_handle(dev);
- if (IS_ERR(ti_sci_pd->ti_sci))
- return PTR_ERR(ti_sci_pd->ti_sci);
+ pd_provider->ti_sci = devm_ti_sci_get_handle(dev);
+ if (IS_ERR(pd_provider->ti_sci))
+ return PTR_ERR(pd_provider->ti_sci);
- ti_sci_pd->dev = dev;
+ pd_provider->dev = dev;
- ti_sci_pd->pd.name = "ti_sci_pd";
+ INIT_LIST_HEAD(&pd_provider->pd_list);
- ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
- ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
+ /* Find highest device ID used for power domains */
+ while (1) {
+ np = of_find_node_with_property(np, "power-domains");
+ if (!np)
+ break;
- ti_sci_pd->pd.dev_ops.start = ti_sci_dev_start;
- ti_sci_pd->pd.dev_ops.stop = ti_sci_dev_stop;
+ index = 0;
- pm_genpd_init(&ti_sci_pd->pd, NULL, true);
+ while (1) {
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells",
+ index, &args);
+ if (ret)
+ break;
- ret = of_genpd_add_provider_simple(np, &ti_sci_pd->pd);
+ if (args.args_count >= 1 && args.np == dev->of_node) {
+ if (args.args[0] > max_id)
+ max_id = args.args[0];
- return ret;
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->pd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "pd:%d",
+ args.args[0]);
+ if (!pd->pd.name)
+ return -ENOMEM;
+
+ pd->pd.power_off = ti_sci_pd_power_off;
+ pd->pd.power_on = ti_sci_pd_power_on;
+ pd->idx = args.args[0];
+ pd->parent = pd_provider;
+
+ pm_genpd_init(&pd->pd, NULL, true);
+
+ list_add(&pd->node, &pd_provider->pd_list);
+ }
+ index++;
+ }
+ }
+
+ pd_provider->data.domains =
+ devm_kcalloc(dev, max_id + 1,
+ sizeof(*pd_provider->data.domains),
+ GFP_KERNEL);
+
+ pd_provider->data.num_domains = max_id + 1;
+ pd_provider->data.xlate = ti_sci_pd_xlate;
+
+ list_for_each_entry(pd, &pd_provider->pd_list, node)
+ pd_provider->data.domains[pd->idx] = &pd->pd;
+
+ return of_genpd_add_provider_onecell(dev->of_node, &pd_provider->data);
}
static struct platform_driver ti_sci_pm_domains_driver = {
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
index d64feeb..a9472e0 100644
--- a/drivers/soc/ux500/ux500-soc-id.c
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -146,9 +146,8 @@
return kasprintf(GFP_KERNEL, "%s", "Unknown");
}
-static ssize_t ux500_get_process(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+process_show(struct device *dev, struct device_attribute *attr, char *buf)
{
if (dbx500_id.process == 0x00)
return sprintf(buf, "Standard\n");
@@ -156,6 +155,15 @@
return sprintf(buf, "%02xnm\n", dbx500_id.process);
}
+static DEVICE_ATTR_RO(process);
+
+static struct attribute *ux500_soc_attrs[] = {
+ &dev_attr_process.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(ux500_soc);
+
static const char *db8500_read_soc_id(struct device_node *backupram)
{
void __iomem *base;
@@ -184,14 +192,11 @@
soc_dev_attr->machine = ux500_get_machine();
soc_dev_attr->family = ux500_get_family();
soc_dev_attr->revision = ux500_get_revision();
+ soc_dev_attr->custom_attr_group = ux500_soc_groups[0];
}
-static const struct device_attribute ux500_soc_attr =
- __ATTR(process, S_IRUGO, ux500_get_process, NULL);
-
static int __init ux500_soc_device_init(void)
{
- struct device *parent;
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
struct device_node *backupram;
@@ -217,9 +222,6 @@
return PTR_ERR(soc_dev);
}
- parent = soc_device_to_device(soc_dev);
- device_create_file(parent, &ux500_soc_attr);
-
return 0;
}
subsys_initcall(ux500_soc_device_init);
diff --git a/drivers/soc/versatile/soc-integrator.c b/drivers/soc/versatile/soc-integrator.c
index ae13fa2..bab4ad8 100644
--- a/drivers/soc/versatile/soc-integrator.c
+++ b/drivers/soc/versatile/soc-integrator.c
@@ -56,49 +56,51 @@
}
}
-static ssize_t integrator_get_manf(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", integrator_coreid >> 24);
}
-static struct device_attribute integrator_manf_attr =
- __ATTR(manufacturer, S_IRUGO, integrator_get_manf, NULL);
+static DEVICE_ATTR_RO(manufacturer);
-static ssize_t integrator_get_arch(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+arch_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", integrator_arch_str(integrator_coreid));
}
-static struct device_attribute integrator_arch_attr =
- __ATTR(arch, S_IRUGO, integrator_get_arch, NULL);
+static DEVICE_ATTR_RO(arch);
-static ssize_t integrator_get_fpga(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", integrator_fpga_str(integrator_coreid));
}
-static struct device_attribute integrator_fpga_attr =
- __ATTR(fpga, S_IRUGO, integrator_get_fpga, NULL);
+static DEVICE_ATTR_RO(fpga);
-static ssize_t integrator_get_build(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", (integrator_coreid >> 4) & 0xFF);
}
-static struct device_attribute integrator_build_attr =
- __ATTR(build, S_IRUGO, integrator_get_build, NULL);
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *integrator_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_arch.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(integrator);
static int __init integrator_soc_init(void)
{
- static struct regmap *syscon_regmap;
+ struct regmap *syscon_regmap;
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
struct device_node *np;
@@ -127,6 +129,7 @@
soc_dev_attr->soc_id = "Integrator";
soc_dev_attr->machine = "Integrator";
soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = integrator_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
@@ -134,11 +137,6 @@
}
dev = soc_device_to_device(soc_dev);
- device_create_file(dev, &integrator_manf_attr);
- device_create_file(dev, &integrator_arch_attr);
- device_create_file(dev, &integrator_fpga_attr);
- device_create_file(dev, &integrator_build_attr);
-
dev_info(dev, "Detected ARM core module:\n");
dev_info(dev, " Manufacturer: %02x\n", (val >> 24));
dev_info(dev, " Architecture: %s\n", integrator_arch_str(val));
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index 9471353..c6876d2 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -39,45 +39,47 @@
}
}
-static ssize_t realview_get_manf(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+manufacturer_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", realview_coreid >> 24);
}
-static struct device_attribute realview_manf_attr =
- __ATTR(manufacturer, S_IRUGO, realview_get_manf, NULL);
+static DEVICE_ATTR_RO(manufacturer);
-static ssize_t realview_get_board(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+board_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "HBI-%03x\n", ((realview_coreid >> 16) & 0xfff));
}
-static struct device_attribute realview_board_attr =
- __ATTR(board, S_IRUGO, realview_get_board, NULL);
+static DEVICE_ATTR_RO(board);
-static ssize_t realview_get_arch(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+fpga_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", realview_arch_str(realview_coreid));
}
-static struct device_attribute realview_arch_attr =
- __ATTR(fpga, S_IRUGO, realview_get_arch, NULL);
+static DEVICE_ATTR_RO(fpga);
-static ssize_t realview_get_build(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+build_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%02x\n", (realview_coreid & 0xFF));
}
-static struct device_attribute realview_build_attr =
- __ATTR(build, S_IRUGO, realview_get_build, NULL);
+static DEVICE_ATTR_RO(build);
+
+static struct attribute *realview_attrs[] = {
+ &dev_attr_manufacturer.attr,
+ &dev_attr_board.attr,
+ &dev_attr_fpga.attr,
+ &dev_attr_build.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(realview);
static int realview_soc_probe(struct platform_device *pdev)
{
@@ -102,6 +104,7 @@
soc_dev_attr->machine = "RealView";
soc_dev_attr->family = "Versatile";
+ soc_dev_attr->custom_attr_group = realview_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
@@ -112,11 +115,6 @@
if (ret)
return -ENODEV;
- device_create_file(soc_device_to_device(soc_dev), &realview_manf_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_board_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_arch_attr);
- device_create_file(soc_device_to_device(soc_dev), &realview_build_attr);
-
dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x, HBI-%03x\n",
realview_coreid,
((realview_coreid >> 16) & 0xfff));
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 3fa162c..646512d 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -21,11 +21,15 @@
bool "Enable Xilinx Zynq MPSoC Power Management driver"
depends on PM && ZYNQMP_FIRMWARE
default y
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
help
Say yes to enable power management support for ZyqnMP SoC.
This driver uses firmware driver as an interface for power
management request to firmware. It registers isr to handle
- power management callbacks from firmware.
+ power management callbacks from firmware. It registers mailbox client
+ to handle power management callbacks from firmware.
+
If in doubt, say N.
config ZYNQMP_PM_DOMAINS
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a840c02..a3aa409 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -511,7 +511,7 @@
return -ENODEV;
}
- xvcu->vcu_slcr_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->vcu_slcr_ba) {
dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
@@ -524,7 +524,7 @@
return -ENODEV;
}
- xvcu->logicore_reg_ba = devm_ioremap_nocache(&pdev->dev, res->start,
+ xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!xvcu->logicore_reg_ba) {
dev_err(&pdev->dev, "logicore register mapping failed.\n");
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 600f57c..226d343 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -2,7 +2,7 @@
/*
* ZynqMP Generic PM domain support
*
- * Copyright (C) 2015-2018 Xilinx, Inc.
+ * Copyright (C) 2015-2019 Xilinx, Inc.
*
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
@@ -23,7 +23,7 @@
/* Flag stating if PM nodes mapped to the PM domain has been requested */
#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
-static const struct zynqmp_eemi_ops *eemi_ops;
+static int min_capability;
/**
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
@@ -74,11 +74,8 @@
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
- ret = eemi_ops->set_requirement(pd->node_id,
+ ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
@@ -106,12 +103,9 @@
int ret;
struct pm_domain_data *pdd, *tmp;
struct zynqmp_pm_domain *pd;
- u32 capabilities = 0;
+ u32 capabilities = min_capability;
bool may_wakeup;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If domain is already released there is nothing to be done */
@@ -132,7 +126,7 @@
}
}
- ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+ ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO);
/**
* If powering down of any node inside this domain fails,
@@ -161,16 +155,13 @@
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->request_node)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the first device to attach there is nothing to do */
if (domain->device_count)
return 0;
- ret = eemi_ops->request_node(pd->node_id, 0, 0,
+ ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
/* If requesting a node fails print and return the error */
if (ret) {
@@ -197,16 +188,13 @@
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->release_node)
- return;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the last device to detach there is nothing to do */
if (domain->device_count)
return;
- ret = eemi_ops->release_node(pd->node_id);
+ ret = zynqmp_pm_release_node(pd->node_id);
/* If releasing a node fails print the error and return */
if (ret) {
pr_err("%s() %s release failed for node %d: %d\n",
@@ -264,10 +252,6 @@
struct zynqmp_pm_domain *pd;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
@@ -283,6 +267,10 @@
if (!domains)
return -ENOMEM;
+ if (!of_device_is_compatible(dev->parent->of_node,
+ "xlnx,zynqmp-firmware"))
+ min_capability = ZYNQMP_PM_CAPABILITY_UNUSABLE;
+
for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
pd->node_id = 0;
pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 1b9d144..c556623 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Power Management
*
- * Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2014-2019 Xilinx, Inc.
*
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
@@ -16,6 +16,20 @@
#include <linux/suspend.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+
+/**
+ * struct zynqmp_pm_work_struct - Wrapper for struct work_struct
+ * @callback_work: Work structure
+ * @args: Callback arguments
+ */
+struct zynqmp_pm_work_struct {
+ struct work_struct callback_work;
+ u32 args[CB_ARG_CNT];
+};
+
+static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
+static struct mbox_chan *rx_chan;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -31,7 +45,6 @@
};
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
-static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
@@ -68,6 +81,53 @@
return IRQ_HANDLED;
}
+static void ipi_receive_callback(struct mbox_client *cl, void *data)
+{
+ struct zynqmp_ipi_message *msg = (struct zynqmp_ipi_message *)data;
+ u32 payload[CB_PAYLOAD_SIZE];
+ int ret;
+
+ memcpy(payload, msg->data, sizeof(msg->len));
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq,
+ &zynqmp_pm_init_suspend_work->callback_work);
+
+ /* Send NULL message to mbox controller to ack the message */
+ ret = mbox_send_message(rx_chan, NULL);
+ if (ret)
+ pr_err("IPI ack failed. Error %d\n", ret);
+ }
+}
+
+/**
+ * zynqmp_pm_init_suspend_work_fn - Initialize suspend
+ * @work: Pointer to work_struct
+ *
+ * Bottom-half of PM callback IRQ handler.
+ */
+static void zynqmp_pm_init_suspend_work_fn(struct work_struct *work)
+{
+ struct zynqmp_pm_work_struct *pm_work =
+ container_of(work, struct zynqmp_pm_work_struct, callback_work);
+
+ if (pm_work->args[0] == SUSPEND_SYSTEM_SHUTDOWN) {
+ orderly_poweroff(true);
+ } else if (pm_work->args[0] == SUSPEND_POWER_REQUEST) {
+ pm_suspend(PM_SUSPEND_MEM);
+ } else {
+ pr_err("%s Unsupported InitSuspendCb reason code %d.\n",
+ __func__, pm_work->args[0]);
+ }
+}
+
static ssize_t suspend_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -94,9 +154,6 @@
{
int md, ret = -EINVAL;
- if (!eemi_ops->set_suspend_mode)
- return ret;
-
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
if (suspend_modes[md] &&
sysfs_streq(suspend_modes[md], buf)) {
@@ -105,7 +162,7 @@
}
if (!ret && md != suspend_mode) {
- ret = eemi_ops->set_suspend_mode(md);
+ ret = zynqmp_pm_set_suspend_mode(md);
if (likely(!ret))
suspend_mode = md;
}
@@ -119,32 +176,55 @@
{
int ret, irq;
u32 pm_api_version;
+ struct mbox_client *client;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
- if (!eemi_ops->get_api_version || !eemi_ops->init_finalize)
- return -ENXIO;
-
- eemi_ops->init_finalize();
- eemi_ops->get_api_version(&pm_api_version);
+ zynqmp_pm_init_finalize();
+ zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
return -ENODEV;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
+ if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+ zynqmp_pm_init_suspend_work =
+ devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work)
+ return -ENOMEM;
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
- IRQF_NO_SUSPEND | IRQF_ONESHOT,
- dev_name(&pdev->dev), &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
- "with %d\n", irq, ret);
- return ret;
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+ client->rx_callback = ipi_receive_callback;
+
+ rx_chan = mbox_request_channel_byname(client, "rx");
+ if (IS_ERR(rx_chan)) {
+ dev_err(&pdev->dev, "Failed to request rx channel\n");
+ return PTR_ERR(rx_chan);
+ }
+ } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' "
+ "failed with %d\n", irq, ret);
+ return ret;
+ }
+ } else {
+ dev_err(&pdev->dev, "Required property not found in DT node\n");
+ return -ENOENT;
}
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
@@ -160,6 +240,9 @@
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (!rx_chan)
+ mbox_free_channel(rx_chan);
+
return 0;
}