aboutsummaryrefslogtreecommitdiff
path: root/plat/imx
diff options
context:
space:
mode:
Diffstat (limited to 'plat/imx')
-rw-r--r--plat/imx/common/aarch32/imx_uart_console.S2
-rw-r--r--plat/imx/common/imx8_helpers.S47
-rw-r--r--plat/imx/common/imx_bl31_common.c23
-rw-r--r--plat/imx/common/imx_io_storage.c (renamed from plat/imx/imx8m/imx8mm/imx8mm_io_storage.c)25
-rw-r--r--plat/imx/common/imx_sip_handler.c94
-rw-r--r--plat/imx/common/imx_sip_svc.c36
-rw-r--r--plat/imx/common/imx_uart_console.S15
-rw-r--r--plat/imx/common/include/imx_plat_common.h16
-rw-r--r--plat/imx/common/include/imx_sip_svc.h49
-rw-r--r--plat/imx/common/include/sci/sci_rpc.h2
-rw-r--r--plat/imx/common/include/sci/svc/pad/sci_pad_api.h4
-rw-r--r--plat/imx/common/include/sci/svc/pm/sci_pm_api.h2
-rw-r--r--plat/imx/common/lpuart_console.S2
-rw-r--r--plat/imx/common/sci/svc/pm/pm_rpc_clnt.c2
-rw-r--r--plat/imx/common/sci/svc/rm/rm_rpc_clnt.c2
-rw-r--r--plat/imx/imx7/common/imx7.mk15
-rw-r--r--plat/imx/imx7/common/imx7_bl2_el3_common.c4
-rw-r--r--plat/imx/imx7/common/imx7_io_storage.c270
-rw-r--r--plat/imx/imx7/include/imx7_def.h2
-rw-r--r--plat/imx/imx7/picopi/include/platform_def.h8
-rw-r--r--plat/imx/imx7/picopi/picopi_bl2_el3_setup.c9
-rw-r--r--plat/imx/imx7/picopi/platform.mk5
-rw-r--r--plat/imx/imx7/warp7/include/platform_def.h8
-rw-r--r--plat/imx/imx7/warp7/platform.mk4
-rw-r--r--plat/imx/imx7/warp7/warp7_bl2_el3_setup.c9
-rw-r--r--plat/imx/imx8m/ddr/clock.c150
-rw-r--r--plat/imx/imx8m/ddr/ddr4_dvfs.c263
-rw-r--r--plat/imx/imx8m/ddr/dram.c388
-rw-r--r--plat/imx/imx8m/ddr/dram_retention.c224
-rw-r--r--plat/imx/imx8m/ddr/lpddr4_dvfs.c295
-rw-r--r--plat/imx/imx8m/gpc_common.c62
-rw-r--r--plat/imx/imx8m/imx8m_caam.c12
-rw-r--r--plat/imx/imx8m/imx8m_ccm.c58
-rw-r--r--plat/imx/imx8m/imx8m_csu.c56
-rw-r--r--plat/imx/imx8m/imx8m_dyn_cfg_helpers.c201
-rw-r--r--plat/imx/imx8m/imx8m_image_load.c (renamed from plat/imx/imx8m/imx8mm/imx8mm_image_load.c)0
-rw-r--r--plat/imx/imx8m/imx8m_measured_boot.c88
-rw-r--r--plat/imx/imx8m/imx8m_psci_common.c25
-rw-r--r--plat/imx/imx8m/imx8m_snvs.c19
-rw-r--r--plat/imx/imx8m/imx8mm/gpc.c294
-rw-r--r--plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c2
-rw-r--r--plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c131
-rw-r--r--plat/imx/imx8m/imx8mm/include/imx8mm_private.h2
-rw-r--r--plat/imx/imx8m/imx8mm/include/imx_sec_def.h238
-rw-r--r--plat/imx/imx8m/imx8mm/include/platform_def.h45
-rw-r--r--plat/imx/imx8m/imx8mm/platform.mk72
-rw-r--r--plat/imx/imx8m/imx8mn/gpc.c127
-rw-r--r--plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c115
-rw-r--r--plat/imx/imx8m/imx8mn/include/imx_sec_def.h229
-rw-r--r--plat/imx/imx8m/imx8mn/include/platform_def.h34
-rw-r--r--plat/imx/imx8m/imx8mn/platform.mk35
-rw-r--r--plat/imx/imx8m/imx8mp/gpc.c30
-rw-r--r--plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c117
-rw-r--r--plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c94
-rw-r--r--plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c139
-rw-r--r--plat/imx/imx8m/imx8mp/imx8mp_rotpk.S15
-rw-r--r--plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c36
-rw-r--r--plat/imx/imx8m/imx8mp/include/imx8mp_private.h15
-rw-r--r--plat/imx/imx8m/imx8mp/include/imx_sec_def.h309
-rw-r--r--plat/imx/imx8m/imx8mp/include/platform_def.h54
-rw-r--r--plat/imx/imx8m/imx8mp/platform.mk128
-rw-r--r--plat/imx/imx8m/imx8mq/gpc.c301
-rw-r--r--plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c93
-rw-r--r--plat/imx/imx8m/imx8mq/imx8mq_psci.c31
-rw-r--r--plat/imx/imx8m/imx8mq/include/imx_sec_def.h249
-rw-r--r--plat/imx/imx8m/imx8mq/include/platform_def.h29
-rw-r--r--plat/imx/imx8m/imx8mq/platform.mk28
-rw-r--r--plat/imx/imx8m/imx_hab.c124
-rw-r--r--plat/imx/imx8m/include/ddrc.h336
-rw-r--r--plat/imx/imx8m/include/dram.h87
-rw-r--r--plat/imx/imx8m/include/gpc.h15
-rw-r--r--plat/imx/imx8m/include/imx8m_ccm.h12
-rw-r--r--plat/imx/imx8m/include/imx8m_csu.h77
-rw-r--r--plat/imx/imx8m/include/imx8m_measured_boot.h16
-rw-r--r--plat/imx/imx8m/include/imx8m_snvs.h12
-rw-r--r--plat/imx/imx8m/include/imx_rdc.h3
-rw-r--r--plat/imx/imx8qm/imx8qm_bl31_setup.c30
-rw-r--r--plat/imx/imx8qm/imx8qm_psci.c2
-rw-r--r--plat/imx/imx8qm/platform.mk8
-rw-r--r--plat/imx/imx8qx/imx8qx_bl31_setup.c38
-rw-r--r--plat/imx/imx8qx/imx8qx_psci.c2
-rw-r--r--plat/imx/imx8qx/include/platform_def.h2
-rw-r--r--plat/imx/imx8qx/platform.mk8
-rw-r--r--plat/imx/imx8ulp/apd_context.c657
-rw-r--r--plat/imx/imx8ulp/dram.c798
-rw-r--r--plat/imx/imx8ulp/imx8ulp_bl31_setup.c186
-rw-r--r--plat/imx/imx8ulp/imx8ulp_caam.c18
-rw-r--r--plat/imx/imx8ulp/imx8ulp_psci.c555
-rw-r--r--plat/imx/imx8ulp/include/dram.h13
-rw-r--r--plat/imx/imx8ulp/include/imx8ulp_caam.h24
-rw-r--r--plat/imx/imx8ulp/include/platform_def.h124
-rw-r--r--plat/imx/imx8ulp/include/scmi.h100
-rw-r--r--plat/imx/imx8ulp/include/scmi_sensor.h139
-rw-r--r--plat/imx/imx8ulp/include/xrdc.h47
-rw-r--r--plat/imx/imx8ulp/platform.mk69
-rw-r--r--plat/imx/imx8ulp/scmi/scmi.c69
-rw-r--r--plat/imx/imx8ulp/scmi/scmi_pd.c371
-rw-r--r--plat/imx/imx8ulp/scmi/scmi_sensor.c85
-rw-r--r--plat/imx/imx8ulp/upower/upmu.h279
-rw-r--r--plat/imx/imx8ulp/upower/upower_api.c3095
-rw-r--r--plat/imx/imx8ulp/upower/upower_api.h1629
-rw-r--r--plat/imx/imx8ulp/upower/upower_defs.h742
-rw-r--r--plat/imx/imx8ulp/upower/upower_hal.c201
-rw-r--r--plat/imx/imx8ulp/upower/upower_soc_defs.h1154
-rw-r--r--plat/imx/imx8ulp/xrdc/xrdc_config.h136
-rw-r--r--plat/imx/imx8ulp/xrdc/xrdc_core.c327
-rw-r--r--plat/imx/imx93/aarch64/plat_helpers.S74
-rw-r--r--plat/imx/imx93/imx93_bl31_setup.c160
-rw-r--r--plat/imx/imx93/imx93_psci.c253
-rw-r--r--plat/imx/imx93/include/platform_def.h97
-rw-r--r--plat/imx/imx93/include/pwr_ctrl.h216
-rw-r--r--plat/imx/imx93/plat_topology.c39
-rw-r--r--plat/imx/imx93/platform.mk46
-rw-r--r--plat/imx/imx93/pwr_ctrl.c63
-rw-r--r--plat/imx/imx93/trdc.c68
-rw-r--r--plat/imx/imx93/trdc_config.h254
116 files changed, 17801 insertions, 547 deletions
diff --git a/plat/imx/common/aarch32/imx_uart_console.S b/plat/imx/common/aarch32/imx_uart_console.S
index 1a1229aabf..2a35b5edf4 100644
--- a/plat/imx/common/aarch32/imx_uart_console.S
+++ b/plat/imx/common/aarch32/imx_uart_console.S
@@ -28,7 +28,7 @@ func console_imx_uart_register
mov r0, r4
pop {r4, lr}
- finish_console_register imx_uart putc=1, getc=1, flush=1
+ finish_console_register imx_uart putc=1, getc=ENABLE_CONSOLE_GETC, flush=1
register_fail:
pop {r4, pc}
diff --git a/plat/imx/common/imx8_helpers.S b/plat/imx/common/imx8_helpers.S
index 19293bfe77..eb9383368d 100644
--- a/plat/imx/common/imx8_helpers.S
+++ b/plat/imx/common/imx8_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -86,6 +86,51 @@ func plat_calc_core_pos
ret
endfunc plat_calc_core_pos
+ /* ----------------------------------------------
+ * function to handle platform specific reset.
+ * ----------------------------------------------
+ */
+func plat_reset_handler
+#if defined(PLAT_imx8ulp)
+ /* enable the 512KB cache by default */
+ mov x0, #IMX_SIM1_BASE
+ /*
+ * if the RVBADDR is ROM entry, that means we did
+ * NOT switch the L2 cache to 512KB. default is 256K config,
+ * so skip
+ */
+ ldr w1, [x0, #0x5c]
+ cmp w1, #0x1000
+ b.eq 1f
+ add x0, x0, #0x30
+ ldr w1, [x0]
+ /* if already 512KB config, skip */
+ tbnz w1, #4, 1f
+ ldr w1, [x0]
+ orr w1, w1, #0x10
+ str w1, [x0]
+ orr w1, w1, #0x10000
+ str w1, [x0]
+ b .
+1: mrs x0, CORTEX_A35_CPUECTLR_EL1
+ orr x0, x0, #(0x1 << 0)
+ orr x0, x0, #(0x1 << 3)
+ msr CORTEX_A35_CPUECTLR_EL1, x0
+
+ mrs x0, CORTEX_A35_L2ECTLR_EL1
+ orr x0, x0, #(0x1 << 0)
+ msr CORTEX_A35_L2ECTLR_EL1, x0
+ isb
+#endif
+ /* enable EL2 cpuectlr RW access */
+ mov x0, #0x73
+ msr actlr_el3, x0
+ msr actlr_el2, x0
+ isb
+
+ ret
+endfunc plat_reset_handler
+
/* ---------------------------------------------
* function to get the entrypoint.
* ---------------------------------------------
diff --git a/plat/imx/common/imx_bl31_common.c b/plat/imx/common/imx_bl31_common.c
new file mode 100644
index 0000000000..f6d7e24871
--- /dev/null
+++ b/plat/imx/common/imx_bl31_common.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2023-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <imx_plat_common.h>
+
+uint32_t plat_get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned long mode;
+ uint32_t spsr;
+
+ /* figure out what mode we enter the non-secure world */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_io_storage.c b/plat/imx/common/imx_io_storage.c
index ff6687e134..bb3566297f 100644
--- a/plat/imx/imx8m/imx8mm/imx8mm_io_storage.c
+++ b/plat/imx/common/imx_io_storage.c
@@ -6,10 +6,10 @@
#include <assert.h>
+#include <common/debug.h>
#include <drivers/io/io_block.h>
#include <drivers/io/io_driver.h>
#include <drivers/io/io_fip.h>
-#include <drivers/io/io_driver.h>
#include <drivers/io/io_memmap.h>
#include <drivers/mmc.h>
#include <lib/utils_def.h>
@@ -21,21 +21,21 @@
static const io_dev_connector_t *fip_dev_con;
static uintptr_t fip_dev_handle;
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
static const io_dev_connector_t *mmc_dev_con;
static uintptr_t mmc_dev_handle;
static const io_block_spec_t mmc_fip_spec = {
- .offset = IMX8MM_FIP_MMC_BASE,
- .length = IMX8MM_FIP_SIZE
+ .offset = IMX_FIP_MMC_BASE,
+ .length = IMX_FIP_SIZE
};
static const io_block_dev_spec_t mmc_dev_spec = {
/* It's used as temp buffer in block driver. */
.buffer = {
- .offset = IMX8MM_FIP_BASE,
+ .offset = IMX_FIP_BASE,
/* do we need a new value? */
- .length = IMX8MM_FIP_SIZE
+ .length = IMX_FIP_SIZE
},
.ops = {
.read = mmc_read_blocks,
@@ -51,8 +51,8 @@ static const io_dev_connector_t *memmap_dev_con;
static uintptr_t memmap_dev_handle;
static const io_block_spec_t fip_block_spec = {
- .offset = IMX8MM_FIP_BASE,
- .length = IMX8MM_FIP_SIZE
+ .offset = IMX_FIP_BASE,
+ .length = IMX_FIP_SIZE
};
static int open_memmap(const uintptr_t spec);
#endif
@@ -113,6 +113,7 @@ static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
};
#endif /* TRUSTED_BOARD_BOOT */
+/* TODO: this structure is replicated multiple times. rationalize it ! */
struct plat_io_policy {
uintptr_t *dev_handle;
uintptr_t image_spec;
@@ -120,7 +121,7 @@ struct plat_io_policy {
};
static const struct plat_io_policy policies[] = {
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
[FIP_IMAGE_ID] = {
&mmc_dev_handle,
(uintptr_t)&mmc_fip_spec,
@@ -219,7 +220,7 @@ static int open_fip(const uintptr_t spec)
return result;
}
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
static int open_mmc(const uintptr_t spec)
{
int result;
@@ -270,11 +271,11 @@ int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
return result;
}
-void plat_imx8mm_io_setup(void)
+void plat_imx_io_setup(void)
{
int result __unused;
-#ifndef IMX8MM_FIP_MMAP
+#ifndef IMX_FIP_MMAP
result = register_io_dev_block(&mmc_dev_con);
assert(result == 0);
diff --git a/plat/imx/common/imx_sip_handler.c b/plat/imx/common/imx_sip_handler.c
index d4b3425af9..5d2918686b 100644
--- a/plat/imx/common/imx_sip_handler.c
+++ b/plat/imx/common/imx_sip_handler.c
@@ -17,10 +17,19 @@
#include <lib/mmio.h>
#include <sci/sci.h>
+#if defined(PLAT_imx8mn) || defined(PLAT_imx8mp)
+/*
+ * Defined in
+ * table 11. ROM event log buffer address location
+ * AN12853 "i.MX ROMs Log Events"
+ */
+#define ROM_LOG_BUFFER_ADDR 0x9E0
+#endif
+
#if defined(PLAT_imx8qm) || defined(PLAT_imx8qx)
#ifdef PLAT_imx8qm
-const static int ap_cluster_index[PLATFORM_CLUSTER_COUNT] = {
+static const int ap_cluster_index[PLATFORM_CLUSTER_COUNT] = {
SC_R_A53, SC_R_A72,
};
#endif
@@ -177,6 +186,76 @@ int imx_src_handler(uint32_t smc_fid,
}
#endif /* defined(PLAT_imx8mm) || defined(PLAT_imx8mq) */
+#if defined(PLAT_imx8mn) || defined(PLAT_imx8mp)
+static bool is_secondary_boot(void)
+{
+ uint32_t *rom_log_addr = (uint32_t *)ROM_LOG_BUFFER_ADDR;
+ bool is_secondary = false;
+ uint32_t *rom_log;
+ uint8_t event_id;
+
+ /* If the ROM event log pointer is not valid. */
+ if (*rom_log_addr < 0x900000 || *rom_log_addr >= 0xB00000 ||
+ *rom_log_addr & 0x3) {
+ return false;
+ }
+
+ /* Parse the ROM event ID version 2 log */
+ rom_log = (uint32_t *)(uintptr_t)(*rom_log_addr);
+ for (size_t i = 0; i < 128; i++) {
+ event_id = rom_log[i] >> 24;
+ switch (event_id) {
+ case 0x00: /* End of list */
+ return is_secondary;
+ /* Log entries with 1 parameter, skip 1 */
+ case 0x80: /* Perform the device initialization */
+ case 0x81: /* The boot device initialization completes */
+ case 0x82: /* Execute boot device driver pre-config */
+ case 0x8F: /* The boot device initialization fails */
+ case 0x90: /* Start to read data from boot device */
+ case 0x91: /* Reading data from boot device completes */
+ case 0x9F: /* Reading data from boot device fails */
+ i += 1;
+ continue;
+ /* Log entries with 2 parameters, skip 2 */
+ case 0xA0: /* Image authentication result */
+ case 0xC0: /* Jump to the boot image soon */
+ i += 2;
+ continue;
+ /* Booted the primary boot image */
+ case 0x50:
+ is_secondary = false;
+ continue;
+ /* Booted the secondary boot image */
+ case 0x51:
+ is_secondary = true;
+ continue;
+ }
+ }
+
+ return is_secondary;
+}
+
+int imx_src_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ void *handle)
+{
+ switch (x1) {
+ case IMX_SIP_SRC_SET_SECONDARY_BOOT:
+ /* we do support that on these SoCs */
+ break;
+ case IMX_SIP_SRC_IS_SECONDARY_BOOT:
+ return is_secondary_boot();
+ default:
+ return SMC_UNK;
+ };
+
+ return 0;
+}
+#endif /* defined(PLAT_imx8mn) || defined(PLAT_imx8mp) */
+
static uint64_t imx_get_commit_hash(u_register_t x2,
u_register_t x3,
u_register_t x4)
@@ -253,3 +332,16 @@ int imx_kernel_entry_handler(uint32_t smc_fid,
return 0;
}
+
+#if defined(PLAT_imx8ulp)
+int imx_hifi_xrdc(uint32_t smc_fid)
+{
+ mmio_setbits_32(IMX_SIM2_BASE + 0x8, BIT_32(19) | BIT_32(17) | BIT_32(18));
+ mmio_clrbits_32(IMX_SIM2_BASE + 0x8, BIT_32(16));
+
+ extern int xrdc_apply_hifi_config(void);
+ xrdc_apply_hifi_config();
+
+ return 0;
+}
+#endif
diff --git a/plat/imx/common/imx_sip_svc.c b/plat/imx/common/imx_sip_svc.c
index fd54820cfe..c625704e6c 100644
--- a/plat/imx/common/imx_sip_svc.c
+++ b/plat/imx/common/imx_sip_svc.c
@@ -1,14 +1,17 @@
/*
- * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdint.h>
+
#include <common/debug.h>
#include <common/runtime_svc.h>
+#include <drivers/scmi-msg.h>
#include <lib/pmf/pmf.h>
#include <tools_share/uuid.h>
+
#include <imx_sip_svc.h>
static int32_t imx_sip_setup(void)
@@ -29,10 +32,33 @@ static uintptr_t imx_sip_handler(unsigned int smc_fid,
case IMX_SIP_AARCH32:
SMC_RET1(handle, imx_kernel_entry_handler(smc_fid, x1, x2, x3, x4));
break;
+#if defined(PLAT_imx8ulp)
+ case IMX_SIP_SCMI:
+ scmi_smt_fastcall_smc_entry(0);
+ SMC_RET1(handle, 0);
+ break;
+ case IMX_SIP_HIFI_XRDC:
+ SMC_RET1(handle, imx_hifi_xrdc(smc_fid));
+ break;
+ case IMX_SIP_DDR_DVFS:
+ return dram_dvfs_handler(smc_fid, handle, x1, x2, x3);
+#endif
#if defined(PLAT_imx8mq)
case IMX_SIP_GET_SOC_INFO:
SMC_RET1(handle, imx_soc_info_handler(smc_fid, x1, x2, x3));
break;
+ case IMX_SIP_GPC:
+ SMC_RET1(handle, imx_gpc_handler(smc_fid, x1, x2, x3));
+ break;
+ case IMX_SIP_DDR_DVFS:
+ return dram_dvfs_handler(smc_fid, handle, x1, x2, x3);
+#endif
+#if defined(PLAT_imx8mm) || defined(PLAT_imx8mn) || defined(PLAT_imx8mp)
+ case IMX_SIP_DDR_DVFS:
+ return dram_dvfs_handler(smc_fid, handle, x1, x2, x3);
+ case IMX_SIP_GPC:
+ SMC_RET1(handle, imx_gpc_handler(smc_fid, x1, x2, x3));
+ break;
#endif
#if (defined(PLAT_imx8qm) || defined(PLAT_imx8qx))
case IMX_SIP_SRTC:
@@ -48,11 +74,17 @@ static uintptr_t imx_sip_handler(unsigned int smc_fid,
case IMX_SIP_MISC_SET_TEMP:
SMC_RET1(handle, imx_misc_set_temp_handler(smc_fid, x1, x2, x3, x4));
#endif
-#if defined(PLAT_imx8mm) || defined(PLAT_imx8mq)
+#if defined(PLAT_imx8mm) || defined(PLAT_imx8mq) || defined(PLAT_imx8mn) || \
+ defined(PLAT_imx8mp)
case IMX_SIP_SRC:
SMC_RET1(handle, imx_src_handler(smc_fid, x1, x2, x3, handle));
break;
#endif
+#if defined(PLAT_imx8mm) || defined(PLAT_imx8mn) || defined(PLAT_imx8mp)
+ case IMX_SIP_HAB:
+ SMC_RET1(handle, imx_hab_handler(smc_fid, x1, x2, x3, x4));
+ break;
+#endif
case IMX_SIP_BUILDINFO:
SMC_RET1(handle, imx_buildinfo_handler(smc_fid, x1, x2, x3, x4));
default:
diff --git a/plat/imx/common/imx_uart_console.S b/plat/imx/common/imx_uart_console.S
index ceeb3a76c8..560db15b58 100644
--- a/plat/imx/common/imx_uart_console.S
+++ b/plat/imx/common/imx_uart_console.S
@@ -12,6 +12,7 @@
#define URXD 0x0 /* Receiver Register */
#define UTXD 0x40 /* Transmitter Register */
+#define USR2 0x98 /* UART Status Register 2 */
#define UTS 0xb4 /* UART Test Register (mx31) */
#define URXD_RX_DATA (0xFF)
@@ -32,7 +33,7 @@ func console_imx_uart_register
mov x0, x6
mov x30, x7
- finish_console_register imx_uart putc=1, getc=1, flush=1
+ finish_console_register imx_uart putc=1, getc=ENABLE_CONSOLE_GETC, flush=1
register_fail:
ret x7
@@ -53,13 +54,13 @@ func console_imx_uart_putc
1:
/* Check if the transmit FIFO is full */
ldr w2, [x1, #UTS]
- tbz w2, #6, 1b
+ tbnz w2, #4, 1b
mov w2, #0xD
str w2, [x1, #UTXD]
2:
/* Check if the transmit FIFO is full */
ldr w2, [x1, #UTS]
- tbz w2, #6, 2b
+ tbnz w2, #4, 2b
str w0, [x1, #UTXD]
ret
putc_error:
@@ -84,5 +85,13 @@ getc_error:
endfunc console_imx_uart_getc
func console_imx_uart_flush
+ ldr x0, [x0, #CONSOLE_T_BASE]
+ cbz x0, flush_exit
+1:
+ /* Wait for the transmit complete bit */
+ ldr w1, [x0, #USR2]
+ tbz w1, #3, 1b
+
+flush_exit:
ret
endfunc console_imx_uart_flush
diff --git a/plat/imx/common/include/imx_plat_common.h b/plat/imx/common/include/imx_plat_common.h
new file mode 100644
index 0000000000..8ec9481db0
--- /dev/null
+++ b/plat/imx/common/include/imx_plat_common.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2023-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_PLAT_COMMON_H
+#define IMX_PLAT_COMMON_H
+
+#include <stdint.h>
+
+#include <arch_helpers.h>
+
+uint32_t plat_get_spsr_for_bl33_entry(void);
+
+#endif /*IMX_PLAT_COMMON_H */
diff --git a/plat/imx/common/include/imx_sip_svc.h b/plat/imx/common/include/imx_sip_svc.h
index 6c7a760c64..e154530e5b 100644
--- a/plat/imx/common/include/imx_sip_svc.h
+++ b/plat/imx/common/include/imx_sip_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2024, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,8 @@
#define __IMX_SIP_SVC_H__
/* SMC function IDs for SiP Service queries */
+#define IMX_SIP_GPC 0xC2000000
+
#define IMX_SIP_CPUFREQ 0xC2000001
#define IMX_SIP_SET_CPUFREQ 0x00
@@ -17,12 +19,25 @@
#define IMX_SIP_BUILDINFO 0xC2000003
#define IMX_SIP_BUILDINFO_GET_COMMITHASH 0x00
+#define IMX_SIP_DDR_DVFS 0xc2000004
+
#define IMX_SIP_SRC 0xC2000005
#define IMX_SIP_SRC_SET_SECONDARY_BOOT 0x10
#define IMX_SIP_SRC_IS_SECONDARY_BOOT 0x11
#define IMX_SIP_GET_SOC_INFO 0xC2000006
+#define IMX_SIP_HAB 0xC2000007
+#define IMX_SIP_HAB_AUTH_IMG 0x00
+#define IMX_SIP_HAB_ENTRY 0x01
+#define IMX_SIP_HAB_EXIT 0x02
+#define IMX_SIP_HAB_REPORT_EVENT 0x03
+#define IMX_SIP_HAB_REPORT_STATUS 0x04
+#define IMX_SIP_HAB_FAILSAFE 0x05
+#define IMX_SIP_HAB_CHECK_TARGET 0x06
+#define IMX_SIP_HAB_GET_VERSION 0x07
+#define IMX_SIP_HAB_AUTH_IMG_NO_DCD 0x08
+
#define IMX_SIP_WAKEUP_SRC 0xC2000009
#define IMX_SIP_WAKEUP_SRC_SCU 0x1
#define IMX_SIP_WAKEUP_SRC_IRQSTEER 0x2
@@ -37,16 +52,39 @@
int imx_kernel_entry_handler(uint32_t smc_fid, u_register_t x1,
u_register_t x2, u_register_t x3,
u_register_t x4);
+
+#define IMX_SIP_SCMI 0xC20000FE
+
+#define IMX_SIP_HIFI_XRDC 0xC200000E
+
#if defined(PLAT_imx8mq)
int imx_soc_info_handler(uint32_t smc_fid, u_register_t x1,
u_register_t x2, u_register_t x3);
+int imx_gpc_handler(uint32_t smc_fid, u_register_t x1,
+ u_register_t x2, u_register_t x3);
+int dram_dvfs_handler(uint32_t smc_fid, void *handle,
+ u_register_t x1, u_register_t x2, u_register_t x3);
#endif
+#if defined(PLAT_imx8mm) || defined(PLAT_imx8mn) || defined(PLAT_imx8mp)
+int dram_dvfs_handler(uint32_t smc_fid, void *handle,
+ u_register_t x1, u_register_t x2, u_register_t x3);
+
+int imx_gpc_handler(uint32_t smc_fid, u_register_t x1,
+ u_register_t x2, u_register_t x3);
+#endif
+
+#if defined(PLAT_imx8mm) || defined(PLAT_imx8mq) || defined(PLAT_imx8mn) || \
+ defined(PLAT_imx8mp)
-#if defined(PLAT_imx8mm) || defined(PLAT_imx8mq)
int imx_src_handler(uint32_t smc_fid, u_register_t x1,
u_register_t x2, u_register_t x3, void *handle);
#endif
+#if defined(PLAT_imx8mm) || defined(PLAT_imx8mn) || defined(PLAT_imx8mp)
+int imx_hab_handler(uint32_t smc_fid, u_register_t x1,
+ u_register_t x2, u_register_t x3, u_register_t x4);
+#endif
+
#if (defined(PLAT_imx8qm) || defined(PLAT_imx8qx))
int imx_cpufreq_handler(uint32_t smc_fid, u_register_t x1,
u_register_t x2, u_register_t x3);
@@ -63,5 +101,12 @@ int imx_misc_set_temp_handler(uint32_t smc_fid, u_register_t x1,
uint64_t imx_buildinfo_handler(uint32_t smc_fid, u_register_t x1,
u_register_t x2, u_register_t x3,
u_register_t x4);
+int scmi_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2, u_register_t x3);
+int imx_hifi_xrdc(uint32_t smc_fid);
+
+#if defined(PLAT_imx8ulp)
+int dram_dvfs_handler(uint32_t smc_fid, void *handle,
+ u_register_t x1, u_register_t x2, u_register_t x3);
+#endif
#endif /* __IMX_SIP_SVC_H__ */
diff --git a/plat/imx/common/include/sci/sci_rpc.h b/plat/imx/common/include/sci/sci_rpc.h
index 60dbc27b6b..b6adf33081 100644
--- a/plat/imx/common/include/sci/sci_rpc.h
+++ b/plat/imx/common/include/sci/sci_rpc.h
@@ -100,7 +100,7 @@ typedef struct sc_rpc_async_msg_s {
void sc_call_rpc(sc_ipc_t ipc, sc_rpc_msg_t *msg, bool no_resp);
/*!
- * This is an internal function to dispath an RPC call that has
+ * This is an internal function to dispatch an RPC call that has
* arrived via IPC over an MU. It is called by server-side SCFW.
*
* @param[in] mu MU message arrived on
diff --git a/plat/imx/common/include/sci/svc/pad/sci_pad_api.h b/plat/imx/common/include/sci/svc/pad/sci_pad_api.h
index dc23eedb38..ac93aae3fd 100644
--- a/plat/imx/common/include/sci/svc/pad/sci_pad_api.h
+++ b/plat/imx/common/include/sci/svc/pad/sci_pad_api.h
@@ -42,7 +42,7 @@
*
* Pads are managed as a resource by the Resource Manager (RM). They have
* assigned owners and only the owners can configure the pads. Some of the
- * pads are reserved for use by the SCFW itself and this can be overriden
+ * pads are reserved for use by the SCFW itself and this can be overridden
* with the implementation of board_config_sc(). Additionally, pads may
* be assigned to various other partitions via the implementation of
* board_system_config().
@@ -156,7 +156,7 @@ typedef uint8_t sc_pad_config_t;
* This type is used to declare a pad low-power isolation config.
* ISO_LATE is the most common setting. ISO_EARLY is only used when
* an output pad is directly determined by another input pad. The
- * other two are only used when SW wants to directly contol isolation.
+ * other two are only used when SW wants to directly control isolation.
*/
typedef uint8_t sc_pad_iso_t;
diff --git a/plat/imx/common/include/sci/svc/pm/sci_pm_api.h b/plat/imx/common/include/sci/svc/pm/sci_pm_api.h
index 76ca5c4eae..13647956ab 100644
--- a/plat/imx/common/include/sci/svc/pm/sci_pm_api.h
+++ b/plat/imx/common/include/sci/svc/pm/sci_pm_api.h
@@ -294,7 +294,7 @@ sc_err_t sc_pm_get_sys_power_mode(sc_ipc_t ipc, sc_rm_pt_t pt,
* Note some resources are still not accessible even when powered up if bus
* transactions go through a fabric not powered up. Examples of this are
* resources in display and capture subsystems which require the display
- * controller or the imaging subsytem to be powered up first.
+ * controller or the imaging subsystem to be powered up first.
*
* Not that resources are grouped into power domains by the underlying
* hardware. If any resource in the domain is on, the entire power domain
diff --git a/plat/imx/common/lpuart_console.S b/plat/imx/common/lpuart_console.S
index ff01e3551c..7acf77384f 100644
--- a/plat/imx/common/lpuart_console.S
+++ b/plat/imx/common/lpuart_console.S
@@ -27,7 +27,7 @@ func console_lpuart_register
mov x0, x6
mov x30, x7
- finish_console_register lpuart putc=1, getc=1, flush=1
+ finish_console_register lpuart putc=1, getc=ENABLE_CONSOLE_GETC, flush=1
register_fail:
ret x7
diff --git a/plat/imx/common/sci/svc/pm/pm_rpc_clnt.c b/plat/imx/common/sci/svc/pm/pm_rpc_clnt.c
index 66a57a13d5..228cff8b52 100644
--- a/plat/imx/common/sci/svc/pm/pm_rpc_clnt.c
+++ b/plat/imx/common/sci/svc/pm/pm_rpc_clnt.c
@@ -412,8 +412,6 @@ void sc_pm_reboot(sc_ipc_t ipc, sc_pm_reset_type_t type)
RPC_SIZE(&msg) = 2U;
sc_call_rpc(ipc, &msg, SC_TRUE);
-
- return;
}
sc_err_t sc_pm_reboot_partition(sc_ipc_t ipc, sc_rm_pt_t pt,
diff --git a/plat/imx/common/sci/svc/rm/rm_rpc_clnt.c b/plat/imx/common/sci/svc/rm/rm_rpc_clnt.c
index 16771a5836..2cea01f11b 100644
--- a/plat/imx/common/sci/svc/rm/rm_rpc_clnt.c
+++ b/plat/imx/common/sci/svc/rm/rm_rpc_clnt.c
@@ -632,8 +632,6 @@ void sc_rm_dump(sc_ipc_t ipc)
RPC_SIZE(&msg) = 1U;
sc_call_rpc(ipc, &msg, SC_FALSE);
-
- return;
}
/**@}*/
diff --git a/plat/imx/imx7/common/imx7.mk b/plat/imx/imx7/common/imx7.mk
index 3a95772b13..156c55dd88 100644
--- a/plat/imx/imx7/common/imx7.mk
+++ b/plat/imx/imx7/common/imx7.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -16,6 +16,7 @@ PLAT_INCLUDES := -Idrivers/imx/uart \
-Iplat/imx/imx7/include \
-Idrivers/imx/timer \
-Idrivers/imx/usdhc \
+ -Iinclude/common/tbbr
# Translation tables library
include lib/xlat_tables_v2/xlat_tables.mk
@@ -46,7 +47,7 @@ BL2_SOURCES += common/desc_image_load.c \
plat/imx/imx7/common/imx7_bl2_el3_common.c \
plat/imx/imx7/common/imx7_helpers.S \
plat/imx/imx7/common/imx7_image_load.c \
- plat/imx/imx7/common/imx7_io_storage.c \
+ plat/imx/common/imx_io_storage.c \
plat/imx/common/aarch32/imx_uart_console.S \
${XLAT_TABLES_LIB_SRCS}
@@ -79,13 +80,13 @@ certificates: $(ROT_KEY)
$(ROT_KEY): | $(BUILD_PLAT)
@echo " OPENSSL $@"
@if [ ! -f $(ROT_KEY) ]; then \
- openssl genrsa 2048 > $@ 2>/dev/null; \
+ ${OPENSSL_BIN_PATH}/openssl genrsa 2048 > $@ 2>/dev/null; \
fi
$(ROTPK_HASH): $(ROT_KEY)
@echo " OPENSSL $@"
- $(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
- openssl dgst -sha256 -binary > $@ 2>/dev/null
+ $(Q)${OPENSSL_BIN_PATH}/openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+ ${OPENSSL_BIN_PATH}/openssl dgst -sha256 -binary > $@ 2>/dev/null
endif
# Add the build options to pack BLx images and kernel device tree
@@ -109,3 +110,7 @@ endif
ifeq (${ARCH},aarch64)
$(error Error: AArch64 not supported on i.mx7)
endif
+
+ifeq (${AARCH32_SP}, none)
+ $(error Variable AARCH32_SP has to be set for AArch32)
+endif
diff --git a/plat/imx/imx7/common/imx7_bl2_el3_common.c b/plat/imx/imx7/common/imx7_bl2_el3_common.c
index 7f156e3068..4e5028c776 100644
--- a/plat/imx/imx7/common/imx7_bl2_el3_common.c
+++ b/plat/imx/imx7/common/imx7_bl2_el3_common.c
@@ -173,7 +173,7 @@ void bl2_el3_early_platform_setup(u_register_t arg1, u_register_t arg2,
console_set_scope(&console, console_scope);
/* Open handles to persistent storage */
- plat_imx7_io_setup();
+ plat_imx_io_setup();
/* Setup higher-level functionality CAAM, RTC etc */
imx_caam_init();
@@ -183,7 +183,7 @@ void bl2_el3_early_platform_setup(u_register_t arg1, u_register_t arg2,
VERBOSE("\tOPTEE 0x%08x-0x%08x\n", IMX7_OPTEE_BASE, IMX7_OPTEE_LIMIT);
VERBOSE("\tATF/BL2 0x%08x-0x%08x\n", BL2_RAM_BASE, BL2_RAM_LIMIT);
VERBOSE("\tSHRAM 0x%08x-0x%08x\n", SHARED_RAM_BASE, SHARED_RAM_LIMIT);
- VERBOSE("\tFIP 0x%08x-0x%08x\n", IMX7_FIP_BASE, IMX7_FIP_LIMIT);
+ VERBOSE("\tFIP 0x%08x-0x%08x\n", IMX_FIP_BASE, IMX_FIP_LIMIT);
VERBOSE("\tDTB-OVERLAY 0x%08x-0x%08x\n", IMX7_DTB_OVERLAY_BASE, IMX7_DTB_OVERLAY_LIMIT);
VERBOSE("\tDTB 0x%08x-0x%08x\n", IMX7_DTB_BASE, IMX7_DTB_LIMIT);
VERBOSE("\tUBOOT/BL33 0x%08x-0x%08x\n", IMX7_UBOOT_BASE, IMX7_UBOOT_LIMIT);
diff --git a/plat/imx/imx7/common/imx7_io_storage.c b/plat/imx/imx7/common/imx7_io_storage.c
deleted file mode 100644
index 977181d0c8..0000000000
--- a/plat/imx/imx7/common/imx7_io_storage.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-
-#include <platform_def.h>
-
-#include <common/debug.h>
-#include <drivers/io/io_block.h>
-#include <drivers/io/io_driver.h>
-#include <drivers/io/io_fip.h>
-#include <drivers/io/io_memmap.h>
-#include <drivers/mmc.h>
-#include <tools_share/firmware_image_package.h>
-
-static const io_dev_connector_t *fip_dev_con;
-static uintptr_t fip_dev_handle;
-
-#ifndef IMX7_FIP_MMAP
-static const io_dev_connector_t *mmc_dev_con;
-static uintptr_t mmc_dev_handle;
-
-static const io_block_spec_t mmc_fip_spec = {
- .offset = IMX7_FIP_MMC_BASE,
- .length = IMX7_FIP_SIZE
-};
-
-static const io_block_dev_spec_t mmc_dev_spec = {
- /* It's used as temp buffer in block driver. */
- .buffer = {
- .offset = IMX7_FIP_BASE,
- /* do we need a new value? */
- .length = IMX7_FIP_SIZE
- },
- .ops = {
- .read = mmc_read_blocks,
- .write = mmc_write_blocks,
- },
- .block_size = MMC_BLOCK_SIZE,
-};
-
-static int open_mmc(const uintptr_t spec);
-
-#else
-static const io_dev_connector_t *memmap_dev_con;
-static uintptr_t memmap_dev_handle;
-
-static const io_block_spec_t fip_block_spec = {
- .offset = IMX7_FIP_BASE,
- .length = IMX7_FIP_SIZE
-};
-static int open_memmap(const uintptr_t spec);
-#endif
-static int open_fip(const uintptr_t spec);
-
-static const io_uuid_spec_t bl32_uuid_spec = {
- .uuid = UUID_SECURE_PAYLOAD_BL32,
-};
-
-static const io_uuid_spec_t bl32_extra1_uuid_spec = {
- .uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA1,
-};
-
-static const io_uuid_spec_t bl32_extra2_uuid_spec = {
- .uuid = UUID_SECURE_PAYLOAD_BL32_EXTRA2,
-};
-
-static const io_uuid_spec_t bl33_uuid_spec = {
- .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
-};
-
-#if TRUSTED_BOARD_BOOT
-static const io_uuid_spec_t tb_fw_cert_uuid_spec = {
- .uuid = UUID_TRUSTED_BOOT_FW_CERT,
-};
-
-static const io_uuid_spec_t trusted_key_cert_uuid_spec = {
- .uuid = UUID_TRUSTED_KEY_CERT,
-};
-
-static const io_uuid_spec_t tos_fw_key_cert_uuid_spec = {
- .uuid = UUID_TRUSTED_OS_FW_KEY_CERT,
-};
-
-static const io_uuid_spec_t tos_fw_cert_uuid_spec = {
- .uuid = UUID_TRUSTED_OS_FW_CONTENT_CERT,
-};
-
-static const io_uuid_spec_t nt_fw_key_cert_uuid_spec = {
- .uuid = UUID_NON_TRUSTED_FW_KEY_CERT,
-};
-
-static const io_uuid_spec_t nt_fw_cert_uuid_spec = {
- .uuid = UUID_NON_TRUSTED_FW_CONTENT_CERT,
-};
-#endif /* TRUSTED_BOARD_BOOT */
-
-/* TODO: this structure is replicated multiple times. rationalize it ! */
-struct plat_io_policy {
- uintptr_t *dev_handle;
- uintptr_t image_spec;
- int (*check)(const uintptr_t spec);
-};
-
-static const struct plat_io_policy policies[] = {
-#ifndef IMX7_FIP_MMAP
- [FIP_IMAGE_ID] = {
- &mmc_dev_handle,
- (uintptr_t)&mmc_fip_spec,
- open_mmc
- },
-#else
- [FIP_IMAGE_ID] = {
- &memmap_dev_handle,
- (uintptr_t)&fip_block_spec,
- open_memmap
- },
-#endif
- [BL32_IMAGE_ID] = {
- &fip_dev_handle,
- (uintptr_t)&bl32_uuid_spec,
- open_fip
- },
- [BL32_EXTRA1_IMAGE_ID] = {
- &fip_dev_handle,
- (uintptr_t)&bl32_extra1_uuid_spec,
- open_fip
- },
- [BL32_EXTRA2_IMAGE_ID] = {
- &fip_dev_handle,
- (uintptr_t)&bl32_extra2_uuid_spec,
- open_fip
- },
- [BL33_IMAGE_ID] = {
- &fip_dev_handle,
- (uintptr_t)&bl33_uuid_spec,
- open_fip
- },
-#if TRUSTED_BOARD_BOOT
- [TRUSTED_BOOT_FW_CERT_ID] = {
- &fip_dev_handle,
- (uintptr_t)&tb_fw_cert_uuid_spec,
- open_fip
- },
- [TRUSTED_KEY_CERT_ID] = {
- &fip_dev_handle,
- (uintptr_t)&trusted_key_cert_uuid_spec,
- open_fip
- },
- [TRUSTED_OS_FW_KEY_CERT_ID] = {
- &fip_dev_handle,
- (uintptr_t)&tos_fw_key_cert_uuid_spec,
- open_fip
- },
- [NON_TRUSTED_FW_KEY_CERT_ID] = {
- &fip_dev_handle,
- (uintptr_t)&nt_fw_key_cert_uuid_spec,
- open_fip
- },
- [TRUSTED_OS_FW_CONTENT_CERT_ID] = {
- &fip_dev_handle,
- (uintptr_t)&tos_fw_cert_uuid_spec,
- open_fip
- },
- [NON_TRUSTED_FW_CONTENT_CERT_ID] = {
- &fip_dev_handle,
- (uintptr_t)&nt_fw_cert_uuid_spec,
- open_fip
- },
-#endif /* TRUSTED_BOARD_BOOT */
-};
-
-static int open_fip(const uintptr_t spec)
-{
- int result;
- uintptr_t local_image_handle;
-
- /* See if a Firmware Image Package is available */
- result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
- if (result == 0) {
- result = io_open(fip_dev_handle, spec, &local_image_handle);
- if (result == 0) {
- VERBOSE("Using FIP\n");
- io_close(local_image_handle);
- }
- }
- return result;
-}
-
-#ifndef IMX7_FIP_MMAP
-static int open_mmc(const uintptr_t spec)
-{
- int result;
- uintptr_t local_handle;
-
- result = io_dev_init(mmc_dev_handle, (uintptr_t)NULL);
- if (result == 0) {
- result = io_open(mmc_dev_handle, spec, &local_handle);
- if (result == 0)
- io_close(local_handle);
- }
- return result;
-}
-#else
-static int open_memmap(const uintptr_t spec)
-{
- int result;
- uintptr_t local_image_handle;
-
- result = io_dev_init(memmap_dev_handle, (uintptr_t)NULL);
- if (result == 0) {
- result = io_open(memmap_dev_handle, spec, &local_image_handle);
- if (result == 0) {
- VERBOSE("Using Memmap\n");
- io_close(local_image_handle);
- }
- }
- return result;
-}
-#endif
-
-int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
- uintptr_t *image_spec)
-{
- int result;
- const struct plat_io_policy *policy;
-
- assert(image_id < ARRAY_SIZE(policies));
-
- policy = &policies[image_id];
- result = policy->check(policy->image_spec);
- assert(result == 0);
-
- *image_spec = policy->image_spec;
- *dev_handle = *policy->dev_handle;
-
- return result;
-}
-
-void plat_imx7_io_setup(void)
-{
- int result __unused;
-
-#ifndef IMX7_FIP_MMAP
- result = register_io_dev_block(&mmc_dev_con);
- assert(result == 0);
-
- result = io_dev_open(mmc_dev_con, (uintptr_t)&mmc_dev_spec,
- &mmc_dev_handle);
- assert(result == 0);
-
-#else
- result = register_io_dev_memmap(&memmap_dev_con);
- assert(result == 0);
-
- result = io_dev_open(memmap_dev_con, (uintptr_t)NULL,
- &memmap_dev_handle);
- assert(result == 0);
-
-#endif
- result = register_io_dev_fip(&fip_dev_con);
- assert(result == 0);
-
- result = io_dev_open(fip_dev_con, (uintptr_t)NULL,
- &fip_dev_handle);
- assert(result == 0);
-}
diff --git a/plat/imx/imx7/include/imx7_def.h b/plat/imx/imx7/include/imx7_def.h
index 77a8ca3a42..d92a2d118c 100644
--- a/plat/imx/imx7/include/imx7_def.h
+++ b/plat/imx/imx7/include/imx7_def.h
@@ -13,7 +13,7 @@
/*******************************************************************************
* Function and variable prototypes
******************************************************************************/
-void plat_imx7_io_setup(void);
+void plat_imx_io_setup(void);
void imx7_platform_setup(u_register_t arg1, u_register_t arg2,
u_register_t arg3, u_register_t arg4);
diff --git a/plat/imx/imx7/picopi/include/platform_def.h b/plat/imx/imx7/picopi/include/platform_def.h
index 141571c671..5f2975dedc 100644
--- a/plat/imx/imx7/picopi/include/platform_def.h
+++ b/plat/imx/imx7/picopi/include/platform_def.h
@@ -92,12 +92,12 @@
#define IMX7_UBOOT_LIMIT (IMX7_UBOOT_BASE + IMX7_UBOOT_SIZE)
/* Define FIP image absolute location 0x80000000 - 0x80100000 */
-#define IMX7_FIP_SIZE 0x00100000
-#define IMX7_FIP_BASE (DRAM_BASE)
-#define IMX7_FIP_LIMIT (IMX7_FIP_BASE + IMX7_FIP_SIZE)
+#define IMX_FIP_SIZE 0x00100000
+#define IMX_FIP_BASE (DRAM_BASE)
+#define IMX_FIP_LIMIT (IMX_FIP_BASE + IMX_FIP_SIZE)
/* Define FIP image location at 1MB offset */
-#define IMX7_FIP_MMC_BASE (1024 * 1024)
+#define IMX_FIP_MMC_BASE (1024 * 1024)
/* Define the absolute location of DTB 0x83000000 - 0x83100000 */
#define IMX7_DTB_SIZE 0x00100000
diff --git a/plat/imx/imx7/picopi/picopi_bl2_el3_setup.c b/plat/imx/imx7/picopi/picopi_bl2_el3_setup.c
index 3cf5c36057..2df96aee09 100644
--- a/plat/imx/imx7/picopi/picopi_bl2_el3_setup.c
+++ b/plat/imx/imx7/picopi/picopi_bl2_el3_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -43,6 +43,8 @@
IOMUXC_SW_PAD_CTL_PAD_SD3_SLEW_SLOW | \
IOMUXC_SW_PAD_CTL_PAD_SD3_DSE_3_X6)
+static struct mmc_device_info mmc_info;
+
static void picopi_setup_pinmux(void)
{
/* Configure UART5 TX */
@@ -93,14 +95,13 @@ static void picopi_setup_pinmux(void)
static void picopi_usdhc_setup(void)
{
imx_usdhc_params_t params;
- struct mmc_device_info info;
zeromem(&params, sizeof(imx_usdhc_params_t));
params.reg_base = PLAT_PICOPI_BOOT_MMC_BASE;
params.clk_rate = 25000000;
params.bus_width = MMC_BUS_WIDTH_8;
- info.mmc_dev_type = MMC_IS_EMMC;
- imx_usdhc_init(&params, &info);
+ mmc_info.mmc_dev_type = MMC_IS_EMMC;
+ imx_usdhc_init(&params, &mmc_info);
}
static void picopi_setup_usb_clocks(void)
diff --git a/plat/imx/imx7/picopi/platform.mk b/plat/imx/imx7/picopi/platform.mk
index 5901001eb3..c599675475 100644
--- a/plat/imx/imx7/picopi/platform.mk
+++ b/plat/imx/imx7/picopi/platform.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -22,7 +22,7 @@ WORKAROUND_CVE_2017_5715 := 0
RESET_TO_BL31 := 0
# Non-TF Boot ROM
-BL2_AT_EL3 := 1
+RESET_TO_BL2 := 1
# Indicate single-core
COLD_BOOT_SINGLE_CPU := 1
@@ -34,7 +34,6 @@ SEPARATE_CODE_AND_RODATA := 1
USE_COHERENT_MEM := 1
# Use multi console API
-MULTI_CONSOLE_API := 1
PLAT_PICOPI_UART :=5
$(eval $(call add_define,PLAT_PICOPI_UART))
diff --git a/plat/imx/imx7/warp7/include/platform_def.h b/plat/imx/imx7/warp7/include/platform_def.h
index 4afcb54976..683e50d44a 100644
--- a/plat/imx/imx7/warp7/include/platform_def.h
+++ b/plat/imx/imx7/warp7/include/platform_def.h
@@ -94,12 +94,12 @@
#define IMX7_UBOOT_LIMIT (IMX7_UBOOT_BASE + IMX7_UBOOT_SIZE)
/* Define FIP image absolute location 0x80000000 - 0x80100000 */
-#define IMX7_FIP_SIZE 0x00100000
-#define IMX7_FIP_BASE (DRAM_BASE)
-#define IMX7_FIP_LIMIT (IMX7_FIP_BASE + IMX7_FIP_SIZE)
+#define IMX_FIP_SIZE 0x00100000
+#define IMX_FIP_BASE (DRAM_BASE)
+#define IMX_FIP_LIMIT (IMX_FIP_BASE + IMX_FIP_SIZE)
/* Define FIP image location at 1MB offset */
-#define IMX7_FIP_MMC_BASE (1024 * 1024)
+#define IMX_FIP_MMC_BASE (1024 * 1024)
/* Define the absolute location of DTB 0x83000000 - 0x83100000 */
#define IMX7_DTB_SIZE 0x00100000
diff --git a/plat/imx/imx7/warp7/platform.mk b/plat/imx/imx7/warp7/platform.mk
index ea0f001e89..bd3d8d3617 100644
--- a/plat/imx/imx7/warp7/platform.mk
+++ b/plat/imx/imx7/warp7/platform.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -22,7 +22,7 @@ WORKAROUND_CVE_2017_5715 := 0
RESET_TO_BL31 := 0
# Non-TF Boot ROM
-BL2_AT_EL3 := 1
+RESET_TO_BL2 := 1
# Indicate single-core
COLD_BOOT_SINGLE_CPU := 1
diff --git a/plat/imx/imx7/warp7/warp7_bl2_el3_setup.c b/plat/imx/imx7/warp7/warp7_bl2_el3_setup.c
index 935a411ac8..ec13ade0e6 100644
--- a/plat/imx/imx7/warp7/warp7_bl2_el3_setup.c
+++ b/plat/imx/imx7/warp7/warp7_bl2_el3_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -69,6 +69,8 @@
IOMUXC_SW_PAD_CTL_PAD_ECSPI1_SCLK_HYS_EN | \
IOMUXC_SW_PAD_CTL_PAD_ECSPI1_SCLK_DSE_1_X4)
+static struct mmc_device_info mmc_info;
+
static void warp7_setup_pinmux(void)
{
/* Configure UART1 TX */
@@ -99,14 +101,13 @@ static void warp7_setup_pinmux(void)
static void warp7_usdhc_setup(void)
{
imx_usdhc_params_t params;
- struct mmc_device_info info;
zeromem(&params, sizeof(imx_usdhc_params_t));
params.reg_base = PLAT_WARP7_BOOT_MMC_BASE;
params.clk_rate = 25000000;
params.bus_width = MMC_BUS_WIDTH_8;
- info.mmc_dev_type = MMC_IS_EMMC;
- imx_usdhc_init(&params, &info);
+ mmc_info.mmc_dev_type = MMC_IS_EMMC;
+ imx_usdhc_init(&params, &mmc_info);
}
static void warp7_setup_usb_clocks(void)
diff --git a/plat/imx/imx8m/ddr/clock.c b/plat/imx/imx8m/ddr/clock.c
new file mode 100644
index 0000000000..21a1b68429
--- /dev/null
+++ b/plat/imx/imx8m/ddr/clock.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2018-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#define IMX_CCM_IP_BASE (IMX_CCM_BASE + 0xa000)
+#define DRAM_SEL_CFG (IMX_CCM_BASE + 0x9800)
+#define CCM_IP_CLK_ROOT_GEN_TAGET(i) (IMX_CCM_IP_BASE + 0x80 * (i) + 0x00)
+#define CCM_IP_CLK_ROOT_GEN_TAGET_SET(i) (IMX_CCM_IP_BASE + 0x80 * (i) + 0x04)
+#define CCM_IP_CLK_ROOT_GEN_TAGET_CLR(i) (IMX_CCM_IP_BASE + 0x80 * (i) + 0x08)
+#define PLL_FREQ_800M U(0x00ece580)
+#define PLL_FREQ_400M U(0x00ec6984)
+#define PLL_FREQ_167M U(0x00f5a406)
+
+void ddr_pll_bypass_100mts(void)
+{
+ /* change the clock source of dram_alt_clk_root to source 2 --100MHz */
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(0), (0x7 << 24) | (0x7 << 16));
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(0), (0x2 << 24));
+
+ /* change the clock source of dram_apb_clk_root to source 2 --40MHz/2 */
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(1), (0x7 << 24) | (0x7 << 16));
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(1), (0x2 << 24) | (0x1 << 16));
+
+ /* configure pll bypass mode */
+ mmio_write_32(DRAM_SEL_CFG + 0x4, BIT(24));
+}
+
+void ddr_pll_bypass_400mts(void)
+{
+ /* change the clock source of dram_alt_clk_root to source 1 --400MHz */
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(0), (0x7 << 24) | (0x7 << 16));
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(0), (0x1 << 24) | (0x1 << 16));
+
+ /* change the clock source of dram_apb_clk_root to source 3 --160MHz/2 */
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(1), (0x7 << 24) | (0x7 << 16));
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(1), (0x3 << 24) | (0x1 << 16));
+
+ /* configure pll bypass mode */
+ mmio_write_32(DRAM_SEL_CFG + 0x4, BIT(24));
+}
+
+void ddr_pll_unbypass(void)
+{
+ mmio_write_32(DRAM_SEL_CFG + 0x8, BIT(24));
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_CLR(1), (0x7 << 24) | (0x7 << 16));
+ /* to source 4 --800MHz/5 */
+ mmio_write_32(CCM_IP_CLK_ROOT_GEN_TAGET_SET(1), (0x4 << 24) | (0x4 << 16));
+}
+
+#if defined(PLAT_imx8mq)
+void dram_pll_init(unsigned int drate)
+{
+ /* bypass the PLL */
+ mmio_setbits_32(HW_DRAM_PLL_CFG0, 0x30);
+
+ switch (drate) {
+ case 3200:
+ mmio_write_32(HW_DRAM_PLL_CFG2, PLL_FREQ_800M);
+ break;
+ case 1600:
+ mmio_write_32(HW_DRAM_PLL_CFG2, PLL_FREQ_400M);
+ break;
+ case 667:
+ mmio_write_32(HW_DRAM_PLL_CFG2, PLL_FREQ_167M);
+ break;
+ default:
+ break;
+ }
+
+ /* unbypass the PLL */
+ mmio_clrbits_32(HW_DRAM_PLL_CFG0, 0x30);
+ while (!(mmio_read_32(HW_DRAM_PLL_CFG0) & BIT(31))) {
+ ;
+ }
+}
+#else
+void dram_pll_init(unsigned int drate)
+{
+ /* bypass the PLL */
+ mmio_setbits_32(DRAM_PLL_CTRL, (1 << 16));
+ mmio_clrbits_32(DRAM_PLL_CTRL, (1 << 9));
+
+ switch (drate) {
+ case 4000:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (250 << 12) | (3 << 4) | 1);
+ break;
+ case 3734:
+ case 3733:
+ case 3732:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (311 << 12) | (4 << 4) | 1);
+ break;
+ case 3600:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (300 << 12) | (8 << 4) | 0);
+ break;
+ case 3200:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (300 << 12) | (9 << 4) | 0);
+ break;
+ case 2400:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (300 << 12) | (3 << 4) | 2);
+ break;
+ case 1600:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (400 << 12) | (3 << 4) | 3);
+ break;
+ case 1066:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (266 << 12) | (3 << 4) | 3);
+ break;
+ case 667:
+ mmio_write_32(DRAM_PLL_CTRL + 0x4, (334 << 12) | (3 << 4) | 4);
+ break;
+ default:
+ break;
+ }
+
+ mmio_setbits_32(DRAM_PLL_CTRL, BIT(9));
+ /* wait for PLL locked */
+ while (!(mmio_read_32(DRAM_PLL_CTRL) & BIT(31))) {
+ ;
+ }
+
+ /* unbypass the PLL */
+ mmio_clrbits_32(DRAM_PLL_CTRL, BIT(16));
+}
+#endif
+
+/* change the dram clock frequency */
+void dram_clock_switch(unsigned int target_drate, bool bypass_mode)
+{
+ if (bypass_mode) {
+ switch (target_drate) {
+ case 400:
+ ddr_pll_bypass_400mts();
+ break;
+ case 100:
+ ddr_pll_bypass_100mts();
+ break;
+ default:
+ ddr_pll_unbypass();
+ break;
+ }
+ } else {
+ dram_pll_init(target_drate);
+ }
+}
diff --git a/plat/imx/imx8m/ddr/ddr4_dvfs.c b/plat/imx/imx8m/ddr/ddr4_dvfs.c
new file mode 100644
index 0000000000..94bfaba7e1
--- /dev/null
+++ b/plat/imx/imx8m/ddr/ddr4_dvfs.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright 2018-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <dram.h>
+
+void ddr4_mr_write(uint32_t mr, uint32_t data, uint32_t mr_type,
+ uint32_t rank, uint32_t dram_type)
+{
+ uint32_t val, mr_mirror, data_mirror;
+
+ /*
+ * 1. Poll MRSTAT.mr_wr_busy until it is 0 to make sure
+ * that there is no outstanding MR transAction.
+ */
+
+ /*
+ * ERR050712:
+ * When performing a software driven MR access, the following sequence
+ * must be done automatically before performing other APB register accesses.
+ * 1. Set MRCTRL0.mr_wr=1
+ * 2. Check for MRSTAT.mr_wr_busy=0. If not, go to step (2)
+ * 3. Check for MRSTAT.mr_wr_busy=0 again (for the second time). If not, go to step (2)
+ */
+ mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31));
+
+ do {
+ while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1) {
+ ;
+ }
+
+ } while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1);
+
+ /*
+ * 2. Write the MRCTRL0.mr_type, MRCTRL0.mr_addr, MRCTRL0.mr_rank
+ * and (for MRWs) MRCTRL1.mr_data to define the MR transaction.
+ */
+ val = mmio_read_32(DDRC_DIMMCTL(0));
+ if ((val & 0x2) && (rank == 0x2)) {
+ mr_mirror = (mr & 0x4) | ((mr & 0x1) << 1) | ((mr & 0x2) >> 1); /* BA0, BA1 swap */
+ if (dram_type == DDRC_DDR4) {
+ data_mirror = (data & 0x1607) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) |
+ ((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) |
+ ((data & 0x100) >> 1) | ((data & 0x800) << 2) | ((data & 0x2000) >> 2) ;
+ } else {
+ data_mirror = (data & 0xfe07) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) |
+ ((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) |
+ ((data & 0x100) >> 1);
+ }
+ } else {
+ mr_mirror = mr;
+ data_mirror = data;
+ }
+
+ mmio_write_32(DDRC_MRCTRL0(0), mr_type | (mr_mirror << 12) | (rank << 4));
+ mmio_write_32(DDRC_MRCTRL1(0), data_mirror);
+
+ /*
+ * 3. In a separate APB transaction, write the MRCTRL0.mr_wr to 1.
+ * This bit is self-clearing, and triggers the MR transaction.
+ * The uMCTL2 then asserts the MRSTAT.mr_wr_busy while it performs
+ * the MR transaction to SDRAM, and no further accesses can be
+ * initiated until it is deasserted.
+ */
+ mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31));
+
+ while (mmio_read_32(DDRC_MRSTAT(0))) {
+ ;
+ }
+}
+
+void dram_cfg_all_mr(struct dram_info *info, uint32_t pstate)
+{
+ uint32_t num_rank = info->num_rank;
+ uint32_t dram_type = info->dram_type;
+ /*
+ * 15. Perform MRS commands as required to re-program
+ * timing registers in the SDRAM for the new frequency
+ * (in particular, CL, CWL and WR may need to be changed).
+ */
+
+ for (int i = 1; i <= num_rank; i++) {
+ for (int j = 0; j < 6; j++) {
+ ddr4_mr_write(j, info->mr_table[pstate][j], 0, i, dram_type);
+ }
+ ddr4_mr_write(6, info->mr_table[pstate][7], 0, i, dram_type);
+ }
+}
+
+void sw_pstate(uint32_t pstate, uint32_t drate)
+{
+ uint32_t val;
+
+ mmio_write_32(DDRC_SWCTL(0), 0x0);
+
+ /*
+ * Update any registers which may be required to
+ * change for the new frequency.
+ */
+ mmio_write_32(DDRC_MSTR2(0), pstate);
+ mmio_setbits_32(DDRC_MSTR(0), (0x1 << 29));
+
+ /*
+ * Toggle RFSHCTL3.refresh_update_level to allow the
+ * new refresh-related register values to propagate
+ * to the refresh logic.
+ */
+ val = mmio_read_32(DDRC_RFSHCTL3(0));
+ if (val & 0x2) {
+ mmio_write_32(DDRC_RFSHCTL3(0), val & 0xFFFFFFFD);
+ } else {
+ mmio_write_32(DDRC_RFSHCTL3(0), val | 0x2);
+ }
+
+ /*
+ * 19. If required, trigger the initialization in the PHY.
+ * If using the gen2 multiPHY, PLL initialization should
+ * be triggered at this point. See the PHY databook for
+ * details about the frequency change procedure.
+ */
+ mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8));
+ mmio_write_32(DDRC_DFIMISC(0), 0x00000020 | (pstate << 8));
+
+ /* wait DFISTAT.dfi_init_complete to 0 */
+ while (mmio_read_32(DDRC_DFISTAT(0)) & 0x1) {
+ ;
+ }
+
+ /* change the clock to the target frequency */
+ dram_clock_switch(drate, false);
+
+ mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8));
+
+ /* wait DFISTAT.dfi_init_complete to 1 */
+ while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) {
+ ;
+ }
+
+ /*
+ * When changing frequencies the controller may violate the JEDEC
+ * requirement that no more than 16 refreshes should be issued within
+ * 2*tREFI. These extra refreshes are not expected to cause a problem
+ * in the SDRAM. This issue can be avoided by waiting for at least 2*tREFI
+ * before exiting self-refresh in step 19.
+ */
+ udelay(14);
+
+ /* 14. Exit the self-refresh state by setting PWRCTL.selfref_sw = 0. */
+ mmio_clrbits_32(DDRC_PWRCTL(0), (1 << 5));
+
+ while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) == 0x23) {
+ ;
+ }
+}
+
+void ddr4_swffc(struct dram_info *info, unsigned int pstate)
+{
+ uint32_t drate = info->timing_info->fsp_table[pstate];
+
+ /*
+ * 1. set SWCTL.sw_done to disable quasi-dynamic register
+ * programming outside reset.
+ */
+ mmio_write_32(DDRC_SWCTL(0), 0x0);
+
+ /*
+ * 2. Write 0 to PCTRL_n.port_en. This blocks AXI port(s)
+ * from taking any transaction (blocks traffic on AXI ports).
+ */
+ mmio_write_32(DDRC_PCTRL_0(0), 0x0);
+
+ /*
+ * 3. Poll PSTAT.rd_port_busy_n=0 and PSTAT.wr_port_busy_n=0.
+ * Wait until all AXI ports are idle (the uMCTL2 core has to
+ * be idle).
+ */
+ while (mmio_read_32(DDRC_PSTAT(0)) & 0x10001) {
+ ;
+ }
+
+ /*
+ * 4. Write 0 to SBRCTL.scrub_en. Disable SBR, required only if
+ * SBR instantiated.
+ * 5. Poll SBRSTAT.scrub_busy=0.
+ * 6. Set DERATEEN.derate_enable = 0, if DERATEEN.derate_eanble = 1
+ * and the read latency (RL) value needs to change after the frequency
+ * change (LPDDR2/3/4 only).
+ * 7. Set DBG1.dis_hif=1 so that no new commands will be accepted by the uMCTL2.
+ */
+ mmio_setbits_32(DDRC_DBG1(0), (0x1 << 1));
+
+ /*
+ * 8. Poll DBGCAM.dbg_wr_q_empty and DBGCAM.dbg_rd_q_empty to ensure
+ * that write and read data buffers are empty.
+ */
+ while ((mmio_read_32(DDRC_DBGCAM(0)) & 0x06000000) != 0x06000000) {
+ ;
+ }
+
+ /*
+ * 9. For DDR4, update MR6 with the new tDLLK value via the Mode
+ * Register Write signals
+ * 10. Set DFILPCFG0.dfi_lp_en_sr = 0, if DFILPCFG0.dfi_lp_en_sr = 1,
+ * and wait until DFISTAT.dfi_lp_ack
+ * 11. If DFI PHY Master interface is active in uMCTL2, then disable it
+ * 12. Wait until STAT.operating_mode[1:0]!=11 indicating that the
+ * controller is not in self-refresh mode.
+ */
+ if ((mmio_read_32(DDRC_STAT(0)) & 0x3) == 0x3) {
+ VERBOSE("DRAM is in Self Refresh\n");
+ }
+
+ /*
+ * 13. Assert PWRCTL.selfref_sw for the DWC_ddr_umctl2 core to enter
+ * the self-refresh mode.
+ */
+ mmio_setbits_32(DDRC_PWRCTL(0), (1 << 5));
+
+ /*
+ * 14. Wait until STAT.operating_mode[1:0]==11 indicating that the
+ * controller core is in self-refresh mode.
+ */
+ while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) != 0x23) {
+ ;
+ }
+
+ sw_pstate(pstate, drate);
+ dram_cfg_all_mr(info, pstate);
+
+ /* 23. Enable HIF commands by setting DBG1.dis_hif=0. */
+ mmio_clrbits_32(DDRC_DBG1(0), (0x1 << 1));
+
+ /*
+ * 24. Reset DERATEEN.derate_enable = 1 if DERATEEN.derate_enable
+ * has been set to 0 in step 6.
+ * 25. If DFI PHY Master interface was active before step 11 then
+ * enable it back by programming DFIPHYMSTR.phymstr_en = 1'b1.
+ * 26. Write 1 to PCTRL_n.port_en. AXI port(s) are no longer blocked
+ * from taking transactions (Re-enable traffic on AXI ports)
+ */
+ mmio_write_32(DDRC_PCTRL_0(0), 0x1);
+
+ /*
+ * 27. Write 1 to SBRCTL.scrub_en. Enable SBR if desired, only
+ * required if SBR instantiated.
+ */
+
+ /*
+ * set SWCTL.sw_done to enable quasi-dynamic register programming
+ * outside reset.
+ */
+ mmio_write_32(DDRC_SWCTL(0), 0x1);
+
+ /* wait SWSTAT.sw_done_ack to 1 */
+ while (!(mmio_read_32(DDRC_SWSTAT(0)) & 0x1)) {
+ ;
+ }
+}
diff --git a/plat/imx/imx8m/ddr/dram.c b/plat/imx/imx8m/ddr/dram.c
new file mode 100644
index 0000000000..b5f697334e
--- /dev/null
+++ b/plat/imx/imx8m/ddr/dram.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2019-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <bl31/interrupt_mgmt.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+
+#include <dram.h>
+#include <gpc.h>
+
+#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
+#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
+
+struct dram_info dram_info;
+
+/* lock used for DDR DVFS */
+spinlock_t dfs_lock;
+
+#if defined(PLAT_imx8mq)
+/* ocram used to dram timing */
+static uint8_t dram_timing_saved[13 * 1024] __aligned(8);
+#endif
+
+static volatile uint32_t wfe_done;
+static volatile bool wait_ddrc_hwffc_done = true;
+static unsigned int dev_fsp = 0x1;
+
+static uint32_t fsp_init_reg[3][4] = {
+ { DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
+ { DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
+ { DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
+};
+
+#if defined(PLAT_imx8mq)
+static inline struct dram_cfg_param *get_cfg_ptr(void *ptr,
+ void *old_base, void *new_base)
+{
+ uintptr_t offset = (uintptr_t)ptr & ~((uintptr_t)old_base);
+
+ return (struct dram_cfg_param *)(offset + new_base);
+}
+
+/* copy the dram timing info from DRAM to OCRAM */
+void imx8mq_dram_timing_copy(struct dram_timing_info *from)
+{
+ struct dram_timing_info *info = (struct dram_timing_info *)dram_timing_saved;
+
+ /* copy the whole 13KB content used for dram timing info */
+ memcpy(dram_timing_saved, from, sizeof(dram_timing_saved));
+
+ /* correct the header after copied into ocram */
+ info->ddrc_cfg = get_cfg_ptr(info->ddrc_cfg, from, dram_timing_saved);
+ info->ddrphy_cfg = get_cfg_ptr(info->ddrphy_cfg, from, dram_timing_saved);
+ info->ddrphy_trained_csr = get_cfg_ptr(info->ddrphy_trained_csr, from, dram_timing_saved);
+ info->ddrphy_pie = get_cfg_ptr(info->ddrphy_pie, from, dram_timing_saved);
+}
+#endif
+
+#if defined(PLAT_imx8mp)
+static uint32_t lpddr4_mr_read(unsigned int mr_rank, unsigned int mr_addr)
+{
+ unsigned int tmp, drate_byte;
+
+ tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
+ mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), tmp | 0x1);
+ do {
+ tmp = mmio_read_32(DDRC_MRSTAT(0));
+ } while (tmp & 0x1);
+
+ mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | 0x1);
+ mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8));
+ mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4) | BIT(31) | 0x1);
+
+ /* Workaround for SNPS STAR 9001549457 */
+ do {
+ tmp = mmio_read_32(DDRC_MRSTAT(0));
+ } while (tmp & 0x1);
+
+ do {
+ tmp = mmio_read_32(DRC_PERF_MON_MRR0_DAT(0));
+ } while (!(tmp & 0x8));
+ tmp = mmio_read_32(DRC_PERF_MON_MRR1_DAT(0));
+
+ drate_byte = (mmio_read_32(DDRC_DERATEEN(0)) >> 4) & 0xff;
+ tmp = (tmp >> (drate_byte * 8)) & 0xff;
+ mmio_write_32(DRC_PERF_MON_MRR0_DAT(0), 0x4);
+
+ return tmp;
+}
+#endif
+
+static void get_mr_values(uint32_t (*mr_value)[8])
+{
+ uint32_t init_val;
+ unsigned int i, fsp_index;
+
+ for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
+ for (i = 0U; i < 4U; i++) {
+ init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
+ mr_value[fsp_index][2*i] = init_val >> 16;
+ mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
+ }
+
+#if defined(PLAT_imx8mp)
+ if (dram_info.dram_type == DDRC_LPDDR4) {
+ mr_value[fsp_index][5] = lpddr4_mr_read(1, MR12); /* read MR12 from DRAM */
+ mr_value[fsp_index][7] = lpddr4_mr_read(1, MR14); /* read MR14 from DRAM */
+ }
+#endif
+ }
+}
+
+static void save_rank_setting(void)
+{
+ uint32_t i, offset;
+ uint32_t pstate_num = dram_info.num_fsp;
+
+ /* only support maximum 3 setpoints */
+ pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num;
+
+ for (i = 0U; i < pstate_num; i++) {
+ offset = i ? (i + 1) * 0x1000 : 0U;
+ dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset);
+ if (dram_info.dram_type != DDRC_LPDDR4) {
+ dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset);
+ }
+#if !defined(PLAT_imx8mq)
+ dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset);
+#endif
+ }
+#if defined(PLAT_imx8mq)
+ dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0));
+#endif
+}
+/* Restore the ddrc configs */
+void dram_umctl2_init(struct dram_timing_info *timing)
+{
+ struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
+ unsigned int i;
+
+ for (i = 0U; i < timing->ddrc_cfg_num; i++) {
+ mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
+ ddrc_cfg++;
+ }
+
+ /* set the default fsp to P0 */
+ mmio_write_32(DDRC_MSTR2(0), 0x0);
+}
+
+/* Restore the dram PHY config */
+void dram_phy_init(struct dram_timing_info *timing)
+{
+ struct dram_cfg_param *cfg = timing->ddrphy_cfg;
+ unsigned int i;
+
+ /* Restore the PHY init config */
+ cfg = timing->ddrphy_cfg;
+ for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
+ dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
+ cfg++;
+ }
+
+ /* Restore the DDR PHY CSRs */
+ cfg = timing->ddrphy_trained_csr;
+ for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
+ dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
+ cfg++;
+ }
+
+ /* Load the PIE image */
+ cfg = timing->ddrphy_pie;
+ for (i = 0U; i < timing->ddrphy_pie_num; i++) {
+ dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
+ cfg++;
+ }
+}
+
+/* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
+static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
+ void *handle, void *cookie)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+ uint32_t irq;
+
+ irq = plat_ic_acknowledge_interrupt();
+ if (irq < 1022U) {
+ plat_ic_end_of_interrupt(irq);
+ }
+
+ /* set the WFE done status */
+ spin_lock(&dfs_lock);
+ wfe_done |= (1 << cpu_id * 8);
+ dsb();
+ spin_unlock(&dfs_lock);
+
+ while (1) {
+ /* ddr frequency change done */
+ if (!wait_ddrc_hwffc_done)
+ break;
+
+ wfe();
+ }
+
+ return 0;
+}
+
+void dram_info_init(unsigned long dram_timing_base)
+{
+ uint32_t ddrc_mstr, current_fsp;
+ unsigned int idx = 0;
+ uint32_t flags = 0;
+ uint32_t rc;
+ unsigned int i;
+
+ /* Get the dram type & rank */
+ ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
+
+ dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
+ dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ?
+ DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK;
+
+ /* Get current fsp info */
+ current_fsp = mmio_read_32(DDRC_DFIMISC(0));
+ current_fsp = (current_fsp >> 8) & 0xf;
+ dram_info.boot_fsp = current_fsp;
+ dram_info.current_fsp = current_fsp;
+
+#if defined(PLAT_imx8mq)
+ imx8mq_dram_timing_copy((struct dram_timing_info *)dram_timing_base);
+ dram_timing_base = (unsigned long) dram_timing_saved;
+#endif
+ get_mr_values(dram_info.mr_table);
+
+ dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
+
+ /* get the num of supported fsp */
+ for (i = 0U; i < 4U; ++i) {
+ if (!dram_info.timing_info->fsp_table[i]) {
+ break;
+ }
+ idx = i;
+ }
+
+ /* only support maximum 3 setpoints */
+ dram_info.num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i;
+
+ /* no valid fsp table, return directly */
+ if (i == 0U) {
+ return;
+ }
+
+ /* save the DRAMTMG2/9 for rank to rank workaround */
+ save_rank_setting();
+
+ /* check if has bypass mode support */
+ if (dram_info.timing_info->fsp_table[idx] < 666) {
+ dram_info.bypass_mode = true;
+ } else {
+ dram_info.bypass_mode = false;
+ }
+
+ /* Register the EL3 handler for DDR DVFS */
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
+ if (rc != 0) {
+ panic();
+ }
+
+ if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) {
+ /* flush the L1/L2 cache */
+ dcsw_op_all(DCCSW);
+ lpddr4_swffc(&dram_info, dev_fsp, 0x0);
+ dev_fsp = (~dev_fsp) & 0x1;
+ } else if (current_fsp != 0x0) {
+ /* flush the L1/L2 cache */
+ dcsw_op_all(DCCSW);
+ ddr4_swffc(&dram_info, 0x0);
+ }
+}
+
+/*
+ * For each freq return the following info:
+ *
+ * r1: data rate
+ * r2: 1 + dram_core parent
+ * r3: 1 + dram_alt parent index
+ * r4: 1 + dram_apb parent index
+ *
+ * The parent indices can be used by an OS who manages source clocks to enabled
+ * them ahead of the switch.
+ *
+ * A parent value of "0" means "don't care".
+ *
+ * Current implementation of freq switch is hardcoded in
+ * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
+ * a wide variety of rates.
+ */
+int dram_dvfs_get_freq_info(void *handle, u_register_t index)
+{
+ switch (index) {
+ case 0:
+ SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
+ 1, 0, 5);
+ case 1:
+ if (!dram_info.bypass_mode) {
+ SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
+ 1, 0, 0);
+ }
+ SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
+ 2, 2, 4);
+ case 2:
+ if (!dram_info.bypass_mode) {
+ SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
+ 1, 0, 0);
+ }
+ SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
+ 2, 3, 3);
+ case 3:
+ SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
+ 1, 0, 0);
+ default:
+ SMC_RET1(handle, -3);
+ }
+}
+
+int dram_dvfs_handler(uint32_t smc_fid, void *handle,
+ u_register_t x1, u_register_t x2, u_register_t x3)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+ unsigned int fsp_index = x1;
+ uint32_t online_cores = x2;
+
+ if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
+ SMC_RET1(handle, dram_info.num_fsp);
+ } else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
+ return dram_dvfs_get_freq_info(handle, x2);
+ } else if (x1 < 3U) {
+ wait_ddrc_hwffc_done = true;
+ dsb();
+
+ /* trigger the SGI IPI to info other cores */
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
+ plat_ic_raise_el3_sgi(0x8, i);
+ }
+ }
+#if defined(PLAT_imx8mq)
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (i != cpu_id && online_cores & (1 << (i * 8))) {
+ imx_gpc_core_wake(1 << i);
+ }
+ }
+#endif
+ /* make sure all the core in WFE */
+ online_cores &= ~(0x1 << (cpu_id * 8));
+ while (1) {
+ if (online_cores == wfe_done) {
+ break;
+ }
+ }
+
+ /* flush the L1/L2 cache */
+ dcsw_op_all(DCCSW);
+
+ if (dram_info.dram_type == DDRC_LPDDR4) {
+ lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
+ dev_fsp = (~dev_fsp) & 0x1;
+ } else {
+ ddr4_swffc(&dram_info, fsp_index);
+ }
+
+ dram_info.current_fsp = fsp_index;
+ wait_ddrc_hwffc_done = false;
+ wfe_done = 0;
+ dsb();
+ sev();
+ isb();
+ }
+
+ SMC_RET1(handle, 0);
+}
diff --git a/plat/imx/imx8m/ddr/dram_retention.c b/plat/imx/imx8m/ddr/dram_retention.c
new file mode 100644
index 0000000000..d98a37ee9d
--- /dev/null
+++ b/plat/imx/imx8m/ddr/dram_retention.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2018-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+#include <lib/mmio.h>
+
+#include <dram.h>
+#include <gpc_reg.h>
+#include <platform_def.h>
+
+#define SRC_DDR1_RCR (IMX_SRC_BASE + 0x1000)
+#define SRC_DDR2_RCR (IMX_SRC_BASE + 0x1004)
+
+#define CCM_SRC_CTRL_OFFSET (IMX_CCM_BASE + 0x800)
+#define CCM_CCGR_OFFSET (IMX_CCM_BASE + 0x4000)
+#define CCM_TARGET_ROOT_OFFSET (IMX_CCM_BASE + 0x8000)
+#define CCM_SRC_CTRL(n) (CCM_SRC_CTRL_OFFSET + 0x10 * (n))
+#define CCM_CCGR(n) (CCM_CCGR_OFFSET + 0x10 * (n))
+#define CCM_TARGET_ROOT(n) (CCM_TARGET_ROOT_OFFSET + 0x80 * (n))
+
+#define DBGCAM_EMPTY 0x36000000
+
+static void rank_setting_update(void)
+{
+ uint32_t i, offset;
+ uint32_t pstate_num = dram_info.num_fsp;
+
+ /* only support maximum 3 setpoints */
+ pstate_num = (pstate_num > MAX_FSP_NUM) ? MAX_FSP_NUM : pstate_num;
+
+ for (i = 0U; i < pstate_num; i++) {
+ offset = i ? (i + 1) * 0x1000 : 0U;
+ mmio_write_32(DDRC_DRAMTMG2(0) + offset, dram_info.rank_setting[i][0]);
+ if (dram_info.dram_type != DDRC_LPDDR4) {
+ mmio_write_32(DDRC_DRAMTMG9(0) + offset, dram_info.rank_setting[i][1]);
+ }
+
+#if !defined(PLAT_imx8mq)
+ mmio_write_32(DDRC_RANKCTL(0) + offset,
+ dram_info.rank_setting[i][2]);
+#endif
+ }
+#if defined(PLAT_imx8mq)
+ mmio_write_32(DDRC_RANKCTL(0), dram_info.rank_setting[0][2]);
+#endif
+}
+
+void dram_enter_retention(void)
+{
+ /* Wait DBGCAM to be empty */
+ while (mmio_read_32(DDRC_DBGCAM(0)) != DBGCAM_EMPTY) {
+ ;
+ }
+
+ /* Block AXI ports from taking anymore transactions */
+ mmio_write_32(DDRC_PCTRL_0(0), 0x0);
+ /* Wait until all AXI ports are idle */
+ while (mmio_read_32(DDRC_PSTAT(0)) & 0x10001) {
+ ;
+ }
+
+ /* Enter self refresh */
+ mmio_write_32(DDRC_PWRCTL(0), 0xaa);
+
+ /* LPDDR4 & DDR4/DDR3L need to check different status */
+ if (dram_info.dram_type == DDRC_LPDDR4) {
+ while (0x223 != (mmio_read_32(DDRC_STAT(0)) & 0x33f)) {
+ ;
+ }
+ } else {
+ while (0x23 != (mmio_read_32(DDRC_STAT(0)) & 0x3f)) {
+ ;
+ }
+ }
+
+ mmio_write_32(DDRC_DFIMISC(0), 0x0);
+ mmio_write_32(DDRC_SWCTL(0), 0x0);
+ mmio_write_32(DDRC_DFIMISC(0), 0x1f00);
+ mmio_write_32(DDRC_DFIMISC(0), 0x1f20);
+
+ while (mmio_read_32(DDRC_DFISTAT(0)) & 0x1) {
+ ;
+ }
+
+ mmio_write_32(DDRC_DFIMISC(0), 0x1f00);
+ /* wait DFISTAT.dfi_init_complete to 1 */
+ while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) {
+ ;
+ }
+
+ mmio_write_32(DDRC_SWCTL(0), 0x1);
+
+ /* should check PhyInLP3 pub reg */
+ dwc_ddrphy_apb_wr(0xd0000, 0x0);
+ if (!(dwc_ddrphy_apb_rd(0x90028) & 0x1)) {
+ INFO("PhyInLP3 = 1\n");
+ }
+ dwc_ddrphy_apb_wr(0xd0000, 0x1);
+
+ /* pwrdnreqn_async adbm/adbs of ddr */
+ mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, DDRMIX_ADB400_SYNC);
+ while (mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & DDRMIX_ADB400_ACK)
+ ;
+ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, DDRMIX_ADB400_SYNC);
+
+ /* remove PowerOk */
+ mmio_write_32(SRC_DDR1_RCR, 0x8F000008);
+
+ mmio_write_32(CCM_CCGR(5), 0);
+ mmio_write_32(CCM_SRC_CTRL(15), 2);
+
+ /* enable the phy iso */
+ mmio_setbits_32(IMX_GPC_BASE + DDRMIX_PGC, 1);
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, DDRMIX_PWR_REQ);
+
+ VERBOSE("dram enter retention\n");
+}
+
+void dram_exit_retention(void)
+{
+ VERBOSE("dram exit retention\n");
+ /* assert all reset */
+#if defined(PLAT_imx8mq)
+ mmio_write_32(SRC_DDR2_RCR, 0x8F000003);
+ mmio_write_32(SRC_DDR1_RCR, 0x8F00000F);
+ mmio_write_32(SRC_DDR2_RCR, 0x8F000000);
+#else
+ mmio_write_32(SRC_DDR1_RCR, 0x8F00001F);
+ mmio_write_32(SRC_DDR1_RCR, 0x8F00000F);
+#endif
+ mmio_write_32(CCM_CCGR(5), 2);
+ mmio_write_32(CCM_SRC_CTRL(15), 2);
+
+ /* change the clock source of dram_apb_clk_root */
+ mmio_write_32(CCM_TARGET_ROOT(65) + 0x8, (0x7 << 24) | (0x7 << 16));
+ mmio_write_32(CCM_TARGET_ROOT(65) + 0x4, (0x4 << 24) | (0x3 << 16));
+
+ /* disable iso */
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, DDRMIX_PWR_REQ);
+ mmio_write_32(SRC_DDR1_RCR, 0x8F000006);
+
+ /* wait dram pll locked */
+ while (!(mmio_read_32(DRAM_PLL_CTRL) & BIT(31))) {
+ ;
+ }
+
+ /* ddrc re-init */
+ dram_umctl2_init(dram_info.timing_info);
+
+ /*
+ * Skips the DRAM init routine and starts up in selfrefresh mode
+ * Program INIT0.skip_dram_init = 2'b11
+ */
+ mmio_setbits_32(DDRC_INIT0(0), 0xc0000000);
+ /* Keeps the controller in self-refresh mode */
+ mmio_write_32(DDRC_PWRCTL(0), 0xaa);
+ mmio_write_32(DDRC_DBG1(0), 0x0);
+ mmio_write_32(SRC_DDR1_RCR, 0x8F000004);
+ mmio_write_32(SRC_DDR1_RCR, 0x8F000000);
+
+ /* before write Dynamic reg, sw_done should be 0 */
+ mmio_write_32(DDRC_SWCTL(0), 0x0);
+
+#if !PLAT_imx8mn
+ if (dram_info.dram_type == DDRC_LPDDR4) {
+ mmio_write_32(DDRC_DDR_SS_GPR0, 0x01); /*LPDDR4 mode */
+ }
+#endif /* !PLAT_imx8mn */
+
+ mmio_write_32(DDRC_DFIMISC(0), 0x0);
+
+ /* dram phy re-init */
+ dram_phy_init(dram_info.timing_info);
+
+ /* workaround for rank-to-rank issue */
+ rank_setting_update();
+
+ /* DWC_DDRPHYA_APBONLY0_MicroContMuxSel */
+ dwc_ddrphy_apb_wr(0xd0000, 0x0);
+ while (dwc_ddrphy_apb_rd(0x20097)) {
+ ;
+ }
+ dwc_ddrphy_apb_wr(0xd0000, 0x1);
+
+ /* before write Dynamic reg, sw_done should be 0 */
+ mmio_write_32(DDRC_SWCTL(0), 0x0);
+ mmio_write_32(DDRC_DFIMISC(0), 0x20);
+ /* wait DFISTAT.dfi_init_complete to 1 */
+ while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) {
+ ;
+ }
+
+ /* clear DFIMISC.dfi_init_start */
+ mmio_write_32(DDRC_DFIMISC(0), 0x0);
+ /* set DFIMISC.dfi_init_complete_en */
+ mmio_write_32(DDRC_DFIMISC(0), 0x1);
+
+ /* set SWCTL.sw_done to enable quasi-dynamic register programming */
+ mmio_write_32(DDRC_SWCTL(0), 0x1);
+ /* wait SWSTAT.sw_done_ack to 1 */
+ while (!(mmio_read_32(DDRC_SWSTAT(0)) & 0x1)) {
+ ;
+ }
+
+ mmio_write_32(DDRC_PWRCTL(0), 0x88);
+ /* wait STAT to normal state */
+ while (0x1 != (mmio_read_32(DDRC_STAT(0)) & 0x7)) {
+ ;
+ }
+
+ mmio_write_32(DDRC_PCTRL_0(0), 0x1);
+ /* dis_auto-refresh is set to 0 */
+ mmio_write_32(DDRC_RFSHCTL3(0), 0x0);
+
+ /* should check PhyInLP3 pub reg */
+ dwc_ddrphy_apb_wr(0xd0000, 0x0);
+ if (!(dwc_ddrphy_apb_rd(0x90028) & 0x1)) {
+ VERBOSE("PHYInLP3 = 0\n");
+ }
+ dwc_ddrphy_apb_wr(0xd0000, 0x1);
+}
diff --git a/plat/imx/imx8m/ddr/lpddr4_dvfs.c b/plat/imx/imx8m/ddr/lpddr4_dvfs.c
new file mode 100644
index 0000000000..2f5f7b5c92
--- /dev/null
+++ b/plat/imx/imx8m/ddr/lpddr4_dvfs.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2018-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include <dram.h>
+
+static void lpddr4_mr_write(uint32_t mr_rank, uint32_t mr_addr, uint32_t mr_data)
+{
+ /*
+ * 1. Poll MRSTAT.mr_wr_busy until it is 0. This checks that there
+ * is no outstanding MR transaction. No
+ * writes should be performed to MRCTRL0 and MRCTRL1 if MRSTAT.mr_wr_busy = 1.
+ */
+ while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1)
+ ;
+
+ /*
+ * 2. Write the MRCTRL0.mr_type, MRCTRL0.mr_addr,
+ * MRCTRL0.mr_rank and (for MRWs)
+ * MRCTRL1.mr_data to define the MR transaction.
+ */
+ mmio_write_32(DDRC_MRCTRL0(0), (mr_rank << 4));
+ mmio_write_32(DDRC_MRCTRL1(0), (mr_addr << 8) | mr_data);
+ mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31));
+}
+
+void lpddr4_swffc(struct dram_info *info, unsigned int init_fsp,
+ unsigned int fsp_index)
+
+{
+ uint32_t mr, emr, emr2, emr3;
+ uint32_t mr11, mr12, mr22, mr14;
+ uint32_t val;
+ uint32_t derate_backup[3];
+ uint32_t (*mr_data)[8];
+ uint32_t phy_master;
+
+ /* 1. program targetd UMCTL2_REGS_FREQ1/2/3,already done, skip it. */
+
+ /* 2. MR13.FSP-WR=1, MRW to update MR registers */
+ mr_data = info->mr_table;
+ mr = mr_data[fsp_index][0];
+ emr = mr_data[fsp_index][1];
+ emr2 = mr_data[fsp_index][2];
+ emr3 = mr_data[fsp_index][3];
+ mr11 = mr_data[fsp_index][4];
+ mr12 = mr_data[fsp_index][5];
+ mr22 = mr_data[fsp_index][6];
+ mr14 = mr_data[fsp_index][7];
+
+ val = (init_fsp == 1) ? 0x2 << 6 : 0x1 << 6;
+ emr3 = (emr3 & 0x003f) | val | 0x0d00;
+
+ /* 12. set PWRCTL.selfref_en=0 */
+ mmio_clrbits_32(DDRC_PWRCTL(0), 0xf);
+
+ phy_master = mmio_read_32(DDRC_DFIPHYMSTR(0));
+
+ /* It is more safe to config it here */
+ mmio_clrbits_32(DDRC_DFIPHYMSTR(0), 0x1);
+
+ lpddr4_mr_write(3, 13, emr3);
+ lpddr4_mr_write(3, 1, mr);
+ lpddr4_mr_write(3, 2, emr);
+ lpddr4_mr_write(3, 3, emr2);
+ lpddr4_mr_write(3, 11, mr11);
+ lpddr4_mr_write(3, 12, mr12);
+ lpddr4_mr_write(3, 14, mr14);
+ lpddr4_mr_write(3, 22, mr22);
+
+ do {
+ val = mmio_read_32(DDRC_MRSTAT(0));
+ } while (val & 0x1);
+
+ /* 3. disable AXI ports */
+ mmio_write_32(DDRC_PCTRL_0(0), 0x0);
+
+ /* 4.Poll PSTAT.rd_port_busy_n=0 and PSTAT.wr_port_busy_n=0. */
+ do {
+ val = mmio_read_32(DDRC_PSTAT(0));
+ } while (val != 0);
+
+ /* 6.disable SBRCTL.scrub_en, skip if never enable it */
+ /* 7.poll SBRSTAT.scrub_busy Q2: should skip phy master if never enable it */
+ /* Disable phy master */
+#ifdef DFILP_SPT
+ /* 8. disable DFI LP */
+ /* DFILPCFG0.dfi_lp_en_sr */
+ val = mmio_read_32(DDRC_DFILPCFG0(0));
+ if (val & 0x100) {
+ mmio_write_32(DDRC_DFILPCFG0(0), 0x0);
+ do {
+ val = mmio_read_32(DDRC_DFISTAT(0)); // dfi_lp_ack
+ val2 = mmio_read_32(DDRC_STAT(0)); // operating_mode
+ } while (((val & 0x2) == 0x2) && ((val2 & 0x7) == 3));
+ }
+#endif
+ /* 9. wait until in normal or power down states */
+ do {
+ /* operating_mode */
+ val = mmio_read_32(DDRC_STAT(0));
+ } while (((val & 0x7) != 1) && ((val & 0x7) != 2));
+
+ /* 10. Disable automatic derating: derate_enable */
+ val = mmio_read_32(DDRC_DERATEEN(0));
+ derate_backup[0] = val;
+ mmio_clrbits_32(DDRC_DERATEEN(0), 0x1);
+
+ val = mmio_read_32(DDRC_FREQ1_DERATEEN(0));
+ derate_backup[1] = val;
+ mmio_clrbits_32(DDRC_FREQ1_DERATEEN(0), 0x1);
+
+ val = mmio_read_32(DDRC_FREQ2_DERATEEN(0));
+ derate_backup[2] = val;
+ mmio_clrbits_32(DDRC_FREQ2_DERATEEN(0), 0x1);
+
+ /* 11. disable automatic ZQ calibration */
+ mmio_setbits_32(DDRC_ZQCTL0(0), BIT(31));
+ mmio_setbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(31));
+ mmio_setbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(31));
+
+ /* 12. set PWRCTL.selfref_en=0 */
+ mmio_clrbits_32(DDRC_PWRCTL(0), 0x1);
+
+ /* 13.Poll STAT.operating_mode is in "Normal" (001) or "Power-down" (010) */
+ do {
+ val = mmio_read_32(DDRC_STAT(0));
+ } while (((val & 0x7) != 1) && ((val & 0x7) != 2));
+
+ /* 14-15. trigger SW SR */
+ /* bit 5: selfref_sw, bit 6: stay_in_selfref */
+ mmio_setbits_32(DDRC_PWRCTL(0), 0x60);
+
+ /* 16. Poll STAT.selfref_state in "Self Refresh 1" */
+ do {
+ val = mmio_read_32(DDRC_STAT(0));
+ } while ((val & 0x300) != 0x100);
+
+ /* 17. disable dq */
+ mmio_setbits_32(DDRC_DBG1(0), 0x1);
+
+ /* 18. Poll DBGCAM.wr_data_pipeline_empty and DBGCAM.rd_data_pipeline_empty */
+ do {
+ val = mmio_read_32(DDRC_DBGCAM(0));
+ val &= 0x30000000;
+ } while (val != 0x30000000);
+
+ /* 19. change MR13.FSP-OP to new FSP and MR13.VRCG to high current */
+ emr3 = (((~init_fsp) & 0x1) << 7) | (0x1 << 3) | (emr3 & 0x0077) | 0x0d00;
+ lpddr4_mr_write(3, 13, emr3);
+
+ /* 20. enter SR Power Down */
+ mmio_clrsetbits_32(DDRC_PWRCTL(0), 0x60, 0x20);
+
+ /* 21. Poll STAT.selfref_state is in "SR Power down" */
+ do {
+ val = mmio_read_32(DDRC_STAT(0));
+ } while ((val & 0x300) != 0x200);
+
+ /* 22. set dfi_init_complete_en = 0 */
+
+ /* 23. switch clock */
+ /* set SWCTL.dw_done to 0 */
+ mmio_write_32(DDRC_SWCTL(0), 0x0000);
+
+ /* 24. program frequency mode=1(bit 29), target_frequency=target_freq (bit 29) */
+ mmio_write_32(DDRC_MSTR2(0), fsp_index);
+
+ /* 25. DBICTL for FSP-OP[1], skip it if never enable it */
+
+ /* 26.trigger initialization in the PHY */
+
+ /* Q3: if refresh level is updated, then should program */
+ /* as updating refresh, need to toggle refresh_update_level signal */
+ val = mmio_read_32(DDRC_RFSHCTL3(0));
+ val = val ^ 0x2;
+ mmio_write_32(DDRC_RFSHCTL3(0), val);
+
+ /* Q4: only for legacy PHY, so here can skipped */
+
+ /* dfi_frequency -> 0x1x */
+ val = mmio_read_32(DDRC_DFIMISC(0));
+ val &= 0xFE;
+ val |= (fsp_index << 8);
+ mmio_write_32(DDRC_DFIMISC(0), val);
+ /* dfi_init_start */
+ val |= 0x20;
+ mmio_write_32(DDRC_DFIMISC(0), val);
+
+ /* polling dfi_init_complete de-assert */
+ do {
+ val = mmio_read_32(DDRC_DFISTAT(0));
+ } while ((val & 0x1) == 0x1);
+
+ /* change the clock frequency */
+ dram_clock_switch(info->timing_info->fsp_table[fsp_index], info->bypass_mode);
+
+ /* dfi_init_start de-assert */
+ mmio_clrbits_32(DDRC_DFIMISC(0), 0x20);
+
+ /* polling dfi_init_complete re-assert */
+ do {
+ val = mmio_read_32(DDRC_DFISTAT(0));
+ } while ((val & 0x1) == 0x0);
+
+ /* 27. set ZQCTL0.dis_srx_zqcl = 1 */
+ if (fsp_index == 0) {
+ mmio_setbits_32(DDRC_ZQCTL0(0), BIT(30));
+ } else if (fsp_index == 1) {
+ mmio_setbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(30));
+ } else {
+ mmio_setbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(30));
+ }
+
+ /* 28,29. exit "self refresh power down" to stay "self refresh 2" */
+ /* exit SR power down */
+ mmio_clrsetbits_32(DDRC_PWRCTL(0), 0x60, 0x40);
+ /* 30. Poll STAT.selfref_state in "Self refresh 2" */
+ do {
+ val = mmio_read_32(DDRC_STAT(0));
+ } while ((val & 0x300) != 0x300);
+
+ /* 31. change MR13.VRCG to normal */
+ emr3 = (emr3 & 0x00f7) | 0x0d00;
+ lpddr4_mr_write(3, 13, emr3);
+
+ /* restore the PHY master */
+ mmio_write_32(DDRC_DFIPHYMSTR(0), phy_master);
+
+ /* 32. issue ZQ if required: zq_calib_short, bit 4 */
+ /* polling zq_calib_short_busy */
+ mmio_setbits_32(DDRC_DBGCMD(0), 0x10);
+
+ do {
+ val = mmio_read_32(DDRC_DBGSTAT(0));
+ } while ((val & 0x10) != 0x0);
+
+ /* 33. Reset ZQCTL0.dis_srx_zqcl=0 */
+ if (fsp_index == 1)
+ mmio_clrbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(30));
+ else if (fsp_index == 2)
+ mmio_clrbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(30));
+ else
+ mmio_clrbits_32(DDRC_ZQCTL0(0), BIT(30));
+
+ /* set SWCTL.dw_done to 1 and poll SWSTAT.sw_done_ack=1 */
+ mmio_write_32(DDRC_SWCTL(0), 0x1);
+
+ /* wait SWSTAT.sw_done_ack to 1 */
+ do {
+ val = mmio_read_32(DDRC_SWSTAT(0));
+ } while ((val & 0x1) == 0x0);
+
+ /* 34. set PWRCTL.stay_in_selfreh=0, exit SR */
+ mmio_clrbits_32(DDRC_PWRCTL(0), 0x40);
+ /* wait tXSR */
+
+ /* 35. Poll STAT.selfref_state in "Idle" */
+ do {
+ val = mmio_read_32(DDRC_STAT(0));
+ } while ((val & 0x300) != 0x0);
+
+#ifdef DFILP_SPT
+ /* 36. restore dfi_lp.dfi_lp_en_sr */
+ mmio_setbits_32(DDRC_DFILPCFG0(0), BIT(8));
+#endif
+
+ /* 37. re-enable CAM: dis_dq */
+ mmio_clrbits_32(DDRC_DBG1(0), 0x1);
+
+ /* 38. re-enable automatic SR: selfref_en */
+ mmio_setbits_32(DDRC_PWRCTL(0), 0x1);
+
+ /* 39. re-enable automatic ZQ: dis_auto_zq=0 */
+ /* disable automatic ZQ calibration */
+ if (fsp_index == 1)
+ mmio_clrbits_32(DDRC_FREQ1_ZQCTL0(0), BIT(31));
+ else if (fsp_index == 2)
+ mmio_clrbits_32(DDRC_FREQ2_ZQCTL0(0), BIT(31));
+ else
+ mmio_clrbits_32(DDRC_ZQCTL0(0), BIT(31));
+ /* 40. re-emable automatic derating: derate_enable */
+ mmio_write_32(DDRC_DERATEEN(0), derate_backup[0]);
+ mmio_write_32(DDRC_FREQ1_DERATEEN(0), derate_backup[1]);
+ mmio_write_32(DDRC_FREQ2_DERATEEN(0), derate_backup[2]);
+
+ /* 41. write 1 to PCTRL.port_en */
+ mmio_write_32(DDRC_PCTRL_0(0), 0x1);
+
+ /* 42. enable SBRCTL.scrub_en, skip if never enable it */
+}
diff --git a/plat/imx/imx8m/gpc_common.c b/plat/imx/imx8m/gpc_common.c
index 1e55f058dc..71e0af1fd8 100644
--- a/plat/imx/imx8m/gpc_common.c
+++ b/plat/imx/imx8m/gpc_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
+#include <common/runtime_svc.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
@@ -16,14 +17,22 @@
#include <imx8m_psci.h>
#include <plat_imx8.h>
+#define MAX_PLL_NUM U(10)
+
static uint32_t gpc_imr_offset[] = { IMR1_CORE0_A53, IMR1_CORE1_A53, IMR1_CORE2_A53, IMR1_CORE3_A53, };
DEFINE_BAKERY_LOCK(gpc_lock);
+#define FSL_SIP_CONFIG_GPC_PM_DOMAIN 0x03
+
#pragma weak imx_set_cpu_pwr_off
#pragma weak imx_set_cpu_pwr_on
#pragma weak imx_set_cpu_lpm
#pragma weak imx_set_cluster_powerdown
+#pragma weak imx_set_sys_wakeup
+#pragma weak imx_noc_slot_config
+#pragma weak imx_gpc_handler
+#pragma weak imx_anamix_override
void imx_set_cpu_secure_entry(unsigned int core_id, uintptr_t sec_entrypoint)
{
@@ -89,7 +98,7 @@ void imx_set_cpu_lpm(unsigned int core_id, bool pdn)
/* assert the pcg pcr bit of the core */
mmio_setbits_32(IMX_GPC_BASE + COREx_PGC_PCR(core_id), 0x1);
} else {
- /* disbale CORE WFI PDN & IRQ PUP */
+ /* disable CORE WFI PDN & IRQ PUP */
mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_AD, COREx_WFI_PDN(core_id) |
COREx_IRQ_WUP(core_id));
/* deassert the pcg pcr bit of the core */
@@ -206,7 +215,6 @@ void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
}
}
-#pragma weak imx_noc_slot_config
/*
* this function only need to be override by platform
* that support noc power down, for example: imx8mm.
@@ -228,7 +236,7 @@ void imx_set_sys_lpm(unsigned int last_core, bool retention)
if (retention)
val |= (SLPCR_EN_DSM | SLPCR_VSTBY | SLPCR_SBYOS |
- SLPCR_BYPASS_PMIC_READY | SLPCR_A53_FASTWUP_STOP_MODE);
+ SLPCR_BYPASS_PMIC_READY);
mmio_write_32(IMX_GPC_BASE + SLPCR, val);
@@ -250,3 +258,49 @@ void imx_clear_rbc_count(void)
mmio_clrbits_32(IMX_GPC_BASE + SLPCR, SLPCR_RBC_EN |
(0x3f << SLPCR_RBC_COUNT_SHIFT));
}
+
+struct pll_override pll[MAX_PLL_NUM] = {
+ {.reg = 0x0, .override_mask = (1 << 12) | (1 << 8), },
+ {.reg = 0x14, .override_mask = (1 << 12) | (1 << 8), },
+ {.reg = 0x28, .override_mask = (1 << 12) | (1 << 8), },
+ {.reg = 0x50, .override_mask = (1 << 12) | (1 << 8), },
+ {.reg = 0x64, .override_mask = (1 << 10) | (1 << 8), },
+ {.reg = 0x74, .override_mask = (1 << 10) | (1 << 8), },
+ {.reg = 0x84, .override_mask = (1 << 10) | (1 << 8), },
+ {.reg = 0x94, .override_mask = 0x5555500, },
+ {.reg = 0x104, .override_mask = 0x5555500, },
+ {.reg = 0x114, .override_mask = 0x500, },
+};
+
+#define PLL_BYPASS BIT(4)
+void imx_anamix_override(bool enter)
+{
+ unsigned int i;
+
+ /*
+ * bypass all the plls & enable the override bit before
+ * entering DSM mode.
+ */
+ for (i = 0U; i < MAX_PLL_NUM; i++) {
+ if (enter) {
+ mmio_setbits_32(IMX_ANAMIX_BASE + pll[i].reg, PLL_BYPASS);
+ mmio_setbits_32(IMX_ANAMIX_BASE + pll[i].reg, pll[i].override_mask);
+ } else {
+ mmio_clrbits_32(IMX_ANAMIX_BASE + pll[i].reg, PLL_BYPASS);
+ mmio_clrbits_32(IMX_ANAMIX_BASE + pll[i].reg, pll[i].override_mask);
+ }
+ }
+}
+
+int imx_gpc_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2, u_register_t x3)
+{
+ switch (x1) {
+ case FSL_SIP_CONFIG_GPC_PM_DOMAIN:
+ imx_gpc_pm_domain_enable(x2, x3);
+ break;
+ default:
+ return SMC_UNK;
+ }
+
+ return 0;
+}
diff --git a/plat/imx/imx8m/imx8m_caam.c b/plat/imx/imx8m/imx8m_caam.c
index 478005e2ee..a4915506a1 100644
--- a/plat/imx/imx8m/imx8m_caam.c
+++ b/plat/imx/imx8m/imx8m_caam.c
@@ -1,13 +1,16 @@
/*
- * Copyright (c) 2019, NXP. All rights reserved.
+ * Copyright (c) 2019-2022 NXP. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <common/debug.h>
#include <lib/mmio.h>
#include <imx8m_caam.h>
+#define HAB_JR0_DID U(0x8011)
+
void imx8m_caam_init(void)
{
uint32_t sm_cmd;
@@ -20,7 +23,12 @@ void imx8m_caam_init(void)
mmio_write_32(SM_CMD, sm_cmd);
/* config CAAM JRaMID set MID to Cortex A */
- mmio_write_32(CAAM_JR0MID, CAAM_NS_MID);
+ if (mmio_read_32(CAAM_JR0MID) == HAB_JR0_DID) {
+ NOTICE("Do not release JR0 to NS as it can be used by HAB\n");
+ } else {
+ mmio_write_32(CAAM_JR0MID, CAAM_NS_MID);
+ }
+
mmio_write_32(CAAM_JR1MID, CAAM_NS_MID);
mmio_write_32(CAAM_JR2MID, CAAM_NS_MID);
diff --git a/plat/imx/imx8m/imx8m_ccm.c b/plat/imx/imx8m/imx8m_ccm.c
new file mode 100644
index 0000000000..10a00c9900
--- /dev/null
+++ b/plat/imx/imx8m/imx8m_ccm.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2023, Pengutronix. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#define UCR1 0x80
+#define UCR1_UARTEN BIT(0)
+#define DOMAIN0_RUNNING(d) (((d) & 0x3) != 0)
+
+static struct imx_uart {
+ unsigned int ccm_reg;
+ unsigned int uart_base;
+} imx8m_uart_info[] = {
+ { /* UART 1 */
+ .ccm_reg = 0x4490,
+ .uart_base = 0x30860000,
+ }, { /* UART 2 */
+ .ccm_reg = 0x44a0,
+ .uart_base = 0x30890000,
+ }, { /* UART 3 */
+ .ccm_reg = 0x44b0,
+ .uart_base = 0x30880000,
+ }, { /* UART 4 */
+ .ccm_reg = 0x44c0,
+ .uart_base = 0x30a60000,
+ }
+};
+
+unsigned int imx8m_uart_get_base(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx8m_uart_info); i++) {
+ uint32_t val;
+
+ /*
+ * At least check that the clock-gate is ungated before we
+ * access the UART register.
+ */
+ val = mmio_read_32(IMX_CCM_BASE + imx8m_uart_info[i].ccm_reg);
+ if (DOMAIN0_RUNNING(val)) {
+ val = mmio_read_32(imx8m_uart_info[i].uart_base + UCR1);
+ if (val & UCR1_UARTEN) {
+ return imx8m_uart_info[i].uart_base;
+ }
+ }
+ }
+
+ /*
+ * We should return an error and inform the user but we can't do it
+ * this early.
+ */
+ return 0;
+}
diff --git a/plat/imx/imx8m/imx8m_csu.c b/plat/imx/imx8m/imx8m_csu.c
new file mode 100644
index 0000000000..2b3a7d97bb
--- /dev/null
+++ b/plat/imx/imx8m/imx8m_csu.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2020-2022 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include <imx8m_csu.h>
+
+void imx_csu_init(const struct imx_csu_cfg *csu_cfg)
+{
+ const struct imx_csu_cfg *csu = csu_cfg;
+ uint32_t val;
+
+ while (csu->type != CSU_INVALID) {
+ switch (csu->type) {
+ case CSU_CSL:
+ val = mmio_read_32(CSLx_REG(csu->idx));
+ if (val & CSLx_LOCK(csu->idx)) {
+ break;
+ }
+ mmio_clrsetbits_32(CSLx_REG(csu->idx), CSLx_CFG(0xff, csu->idx),
+ CSLx_CFG(csu->csl_level | (csu->lock << 8), csu->idx));
+ break;
+ case CSU_HP:
+ val = mmio_read_32(CSU_HP_REG(csu->idx));
+ if (val & CSU_HP_LOCK(csu->idx)) {
+ break;
+ }
+ mmio_clrsetbits_32(CSU_HP_REG(csu->idx), CSU_HP_CFG(0x1, csu->idx),
+ CSU_HP_CFG(csu->hp | (csu->lock << 0x1), csu->idx));
+ break;
+ case CSU_SA:
+ val = mmio_read_32(CSU_SA_REG(csu->idx));
+ if (val & CSU_SA_LOCK(csu->idx)) {
+ break;
+ }
+ mmio_clrsetbits_32(CSU_SA_REG(csu->idx), CSU_SA_CFG(0x1, csu->idx),
+ CSU_SA_CFG(csu->sa | (csu->lock << 0x1), csu->idx));
+ break;
+ case CSU_HPCONTROL:
+ val = mmio_read_32(CSU_HPCONTROL_REG(csu->idx));
+ if (val & CSU_HPCONTROL_LOCK(csu->idx)) {
+ break;
+ }
+ mmio_clrsetbits_32(CSU_HPCONTROL_REG(csu->idx), CSU_HPCONTROL_CFG(0x1, csu->idx),
+ CSU_HPCONTROL_CFG(csu->hpctrl | (csu->lock << 0x1), csu->idx));
+ break;
+ default:
+ break;
+ }
+
+ csu++;
+ }
+}
diff --git a/plat/imx/imx8m/imx8m_dyn_cfg_helpers.c b/plat/imx/imx8m/imx8m_dyn_cfg_helpers.c
new file mode 100644
index 0000000000..5d65ef2027
--- /dev/null
+++ b/plat/imx/imx8m/imx8m_dyn_cfg_helpers.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022, Linaro.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#if MEASURED_BOOT
+#include <common/desc_image_load.h>
+#endif
+#include <common/fdt_wrappers.h>
+#include <libfdt.h>
+#include <platform_def.h>
+
+#define DTB_PROP_HW_LOG_ADDR "tpm_event_log_addr"
+#define DTB_PROP_HW_LOG_SIZE "tpm_event_log_size"
+
+#if MEASURED_BOOT
+
+static int imx8m_event_log_fdt_init_overlay(uintptr_t dt_base, int dt_size)
+{
+ int ret;
+ int offset;
+ void *dtb = (void *)dt_base;
+
+ ret = fdt_create_empty_tree(dtb, dt_size);
+ if (ret < 0) {
+ ERROR("cannot create empty dtb tree: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ offset = fdt_path_offset(dtb, "/");
+ if (offset < 0) {
+ ERROR("cannot find root of the tree: %s\n",
+ fdt_strerror(offset));
+ return offset;
+ }
+
+ offset = fdt_add_subnode(dtb, offset, "fragment@0");
+ if (offset < 0) {
+ ERROR("cannot add fragment node: %s\n",
+ fdt_strerror(offset));
+ return offset;
+ }
+
+ ret = fdt_setprop_string(dtb, offset, "target-path", "/");
+ if (ret < 0) {
+ ERROR("cannot set target-path property: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ offset = fdt_add_subnode(dtb, offset, "__overlay__");
+ if (offset < 0) {
+ ERROR("cannot add __overlay__ node: %s\n",
+ fdt_strerror(offset));
+ return ret;
+ }
+
+ offset = fdt_add_subnode(dtb, offset, "tpm_event_log");
+ if (offset < 0) {
+ ERROR("cannot add tpm_event_log node: %s\n",
+ fdt_strerror(offset));
+ return offset;
+ }
+
+ ret = fdt_setprop_string(dtb, offset, "compatible",
+ "arm,tpm_event_log");
+ if (ret < 0) {
+ ERROR("cannot set compatible property: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ ret = fdt_setprop_u64(dtb, offset, "tpm_event_log_addr", 0);
+ if (ret < 0) {
+ ERROR("cannot set tpm_event_log_addr property: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ ret = fdt_setprop_u32(dtb, offset, "tpm_event_log_size", 0);
+ if (ret < 0) {
+ ERROR("cannot set tpm_event_log_size property: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Write the Event Log address and its size in the DTB.
+ *
+ * This function is supposed to be called only by BL2.
+ *
+ * Returns:
+ * 0 = success
+ * < 0 = error
+ */
+static int imx8m_set_event_log_info(uintptr_t config_base,
+ uintptr_t log_addr, size_t log_size)
+{
+ /* As libfdt uses void *, we can't avoid this cast */
+ void *dtb = (void *)config_base;
+ const char *compatible_tpm = "arm,tpm_event_log";
+ uint64_t base = cpu_to_fdt64(log_addr);
+ uint32_t sz = cpu_to_fdt32(log_size);
+ int err, node;
+
+ err = fdt_open_into(dtb, dtb, PLAT_IMX8M_DTO_MAX_SIZE);
+ if (err < 0) {
+ ERROR("Invalid Device Tree at %p: error %d\n", dtb, err);
+ return err;
+ }
+
+ /*
+ * Verify that the DTB is valid, before attempting to write to it,
+ * and get the DTB root node.
+ */
+
+ /* Check if the pointer to DT is correct */
+ err = fdt_check_header(dtb);
+ if (err < 0) {
+ WARN("Invalid DTB file passed\n");
+ return err;
+ }
+
+ /*
+ * Find the TPM node in device tree.
+ */
+ node = fdt_node_offset_by_compatible(dtb, -1, compatible_tpm);
+ if (node < 0) {
+ ERROR("The compatible property '%s' not%s", compatible_tpm,
+ " found in the config\n");
+ return node;
+ }
+
+ err = fdt_setprop(dtb, node, DTB_PROP_HW_LOG_ADDR, &base, 8);
+ if (err < 0) {
+ ERROR("Failed to add log addr err %d\n", err);
+ return err;
+ }
+
+ err = fdt_setprop(dtb, node, DTB_PROP_HW_LOG_SIZE, &sz, 4);
+ if (err < 0) {
+ ERROR("Failed to add log addr err %d\n", err);
+ return err;
+ }
+
+ err = fdt_pack(dtb);
+ if (err < 0) {
+ ERROR("Failed to pack Device Tree at %p: error %d\n", dtb, err);
+ return err;
+ }
+
+ /*
+ * Ensure that the info written to the DTB is visible
+ * to other images.
+ */
+ flush_dcache_range(config_base, fdt_totalsize(dtb));
+
+ return err;
+}
+
+/*
+ * This function writes the Event Log address and its size
+ * in the QEMU DTB.
+ *
+ * This function is supposed to be called only by BL2.
+ *
+ * Returns:
+ * 0 = success
+ * < 0 = error
+ */
+int imx8m_set_nt_fw_info(size_t log_size, uintptr_t *ns_log_addr)
+{
+ uintptr_t ns_addr;
+ int err;
+
+ assert(ns_log_addr != NULL);
+
+ ns_addr = PLAT_IMX8M_DTO_BASE + PLAT_IMX8M_DTO_MAX_SIZE;
+
+ imx8m_event_log_fdt_init_overlay(PLAT_IMX8M_DTO_BASE,
+ PLAT_IMX8M_DTO_MAX_SIZE);
+
+ /* Write the Event Log address and its size in the DTB */
+ err = imx8m_set_event_log_info(PLAT_IMX8M_DTO_BASE,
+ ns_addr, log_size);
+
+ /* Return Event Log address in Non-secure memory */
+ *ns_log_addr = (err < 0) ? 0UL : ns_addr;
+ return err;
+}
+
+#endif /* MEASURED_BOOT */
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_image_load.c b/plat/imx/imx8m/imx8m_image_load.c
index 3a030699c7..3a030699c7 100644
--- a/plat/imx/imx8m/imx8mm/imx8mm_image_load.c
+++ b/plat/imx/imx8m/imx8m_image_load.c
diff --git a/plat/imx/imx8m/imx8m_measured_boot.c b/plat/imx/imx8m/imx8m_measured_boot.c
new file mode 100644
index 0000000000..159be00a86
--- /dev/null
+++ b/plat/imx/imx8m/imx8m_measured_boot.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
+ * Copyright (c) 2022, Linaro.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include "./include/imx8m_measured_boot.h"
+#include <drivers/measured_boot/event_log/event_log.h>
+#include <drivers/measured_boot/metadata.h>
+#include <plat/arm/common/plat_arm.h>
+
+/* Event Log data */
+static uint8_t event_log[PLAT_IMX_EVENT_LOG_MAX_SIZE];
+
+/* FVP table with platform specific image IDs, names and PCRs */
+static const event_log_metadata_t imx8m_event_log_metadata[] = {
+ { BL31_IMAGE_ID, MBOOT_BL31_IMAGE_STRING, PCR_0 },
+ { BL32_IMAGE_ID, MBOOT_BL32_IMAGE_STRING, PCR_0 },
+ { BL32_EXTRA1_IMAGE_ID, MBOOT_BL32_EXTRA1_IMAGE_STRING, PCR_0 },
+ { BL32_EXTRA2_IMAGE_ID, MBOOT_BL32_EXTRA2_IMAGE_STRING, PCR_0 },
+ { BL33_IMAGE_ID, MBOOT_BL33_IMAGE_STRING, PCR_0 },
+ { EVLOG_INVALID_ID, NULL, (unsigned int)(-1) } /* Terminator */
+};
+
+int plat_mboot_measure_image(unsigned int image_id, image_info_t *image_data)
+{
+ /* Calculate image hash and record data in Event Log */
+ int err = event_log_measure_and_record(image_data->image_base,
+ image_data->image_size,
+ image_id,
+ imx8m_event_log_metadata);
+ if (err != 0) {
+ ERROR("%s%s image id %u (%i)\n",
+ "Failed to ", "record", image_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+void bl2_plat_mboot_init(void)
+{
+ event_log_init(event_log, event_log + sizeof(event_log));
+ event_log_write_header();
+}
+
+void bl2_plat_mboot_finish(void)
+{
+ int rc = 0;
+
+ /* Event Log address in Non-Secure memory */
+ uintptr_t ns_log_addr;
+
+ /* Event Log filled size */
+ size_t event_log_cur_size;
+
+ event_log_cur_size = event_log_get_cur_size(event_log);
+
+ rc = imx8m_set_nt_fw_info(event_log_cur_size, &ns_log_addr);
+ if (rc != 0) {
+ ERROR("%s(): Unable to update %s_FW_CONFIG\n",
+ __func__, "NT");
+ /*
+ * It is a fatal error because on i.MX U-boot assumes that
+ * a valid event log exists and will use it to record the
+ * measurements into the fTPM.
+ */
+ panic();
+ }
+
+ /* Copy Event Log to Non-secure memory */
+ (void)memcpy((void *)ns_log_addr, (const void *)event_log,
+ event_log_cur_size);
+
+ /* Ensure that the Event Log is visible in Non-secure memory */
+ flush_dcache_range(ns_log_addr, event_log_cur_size);
+
+ dump_event_log((uint8_t *)event_log, event_log_cur_size);
+}
+
+int plat_mboot_measure_key(const void *pk_oid, const void *pk_ptr,
+ size_t pk_len)
+{
+ return 0;
+}
diff --git a/plat/imx/imx8m/imx8m_psci_common.c b/plat/imx/imx8m/imx8m_psci_common.c
index 9dfd311d13..48eb8a6612 100644
--- a/plat/imx/imx8m/imx8m_psci_common.c
+++ b/plat/imx/imx8m/imx8m_psci_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -13,6 +13,7 @@
#include <lib/mmio.h>
#include <lib/psci/psci.h>
+#include <dram.h>
#include <gpc.h>
#include <imx8m_psci.h>
#include <plat_imx8.h>
@@ -23,6 +24,7 @@
* reuse below ones.
*/
#pragma weak imx_validate_power_state
+#pragma weak imx_pwr_domain_off
#pragma weak imx_domain_suspend
#pragma weak imx_domain_suspend_finish
#pragma weak imx_get_sys_suspend_power_state
@@ -39,7 +41,7 @@ int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
int imx_pwr_domain_on(u_register_t mpidr)
{
unsigned int core_id;
- uint64_t base_addr = BL31_BASE;
+ uint64_t base_addr = BL31_START;
core_id = MPIDR_AFFLVL0_VAL(mpidr);
@@ -101,7 +103,7 @@ void imx_cpu_standby(plat_local_state_t cpu_state)
void imx_domain_suspend(const psci_power_state_t *target_state)
{
- uint64_t base_addr = BL31_BASE;
+ uint64_t base_addr = BL31_START;
uint64_t mpidr = read_mpidr_el1();
unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
@@ -118,8 +120,11 @@ void imx_domain_suspend(const psci_power_state_t *target_state)
if (!is_local_state_run(CLUSTER_PWR_STATE(target_state)))
imx_set_cluster_powerdown(core_id, CLUSTER_PWR_STATE(target_state));
- if (is_local_state_off(SYSTEM_PWR_STATE(target_state)))
+ if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) {
imx_set_sys_lpm(core_id, true);
+ dram_enter_retention();
+ imx_anamix_override(true);
+ }
}
void imx_domain_suspend_finish(const psci_power_state_t *target_state)
@@ -127,8 +132,11 @@ void imx_domain_suspend_finish(const psci_power_state_t *target_state)
uint64_t mpidr = read_mpidr_el1();
unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
- if (is_local_state_off(SYSTEM_PWR_STATE(target_state)))
+ if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) {
+ imx_anamix_override(false);
+ dram_exit_retention();
imx_set_sys_lpm(core_id, false);
+ }
if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
imx_clear_rbc_count();
@@ -222,8 +230,11 @@ int imx_system_reset2(int is_vendor, int reset_type, u_register_t cookie)
void __dead2 imx_system_off(void)
{
- mmio_write_32(IMX_SNVS_BASE + SNVS_LPCR, SNVS_LPCR_SRTC_ENV |
- SNVS_LPCR_DP_EN | SNVS_LPCR_TOP);
+ uint32_t val;
+
+ val = mmio_read_32(IMX_SNVS_BASE + SNVS_LPCR);
+ val |= SNVS_LPCR_SRTC_ENV | SNVS_LPCR_DP_EN | SNVS_LPCR_TOP;
+ mmio_write_32(IMX_SNVS_BASE + SNVS_LPCR, val);
while (1)
;
diff --git a/plat/imx/imx8m/imx8m_snvs.c b/plat/imx/imx8m/imx8m_snvs.c
new file mode 100644
index 0000000000..7874a68690
--- /dev/null
+++ b/plat/imx/imx8m/imx8m_snvs.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#define SNVS_HPCOMR U(0x04)
+#define SNVS_NPSWA_EN BIT(31)
+
+void enable_snvs_privileged_access(void)
+{
+ unsigned int val;
+
+ val = mmio_read_32(IMX_SNVS_BASE + SNVS_HPCOMR);
+ mmio_write_32(IMX_SNVS_BASE + SNVS_HPCOMR, val | SNVS_NPSWA_EN);
+}
diff --git a/plat/imx/imx8m/imx8mm/gpc.c b/plat/imx/imx8m/imx8mm/gpc.c
index ab59292e5d..f173a16494 100644
--- a/plat/imx/imx8m/imx8mm/gpc.c
+++ b/plat/imx/imx8m/imx8mm/gpc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -19,6 +19,293 @@
#include <gpc.h>
#include <imx_sip_svc.h>
+#define CCGR(x) (0x4000 + (x) * 16)
+
+enum pu_domain_id {
+ HSIOMIX,
+ PCIE,
+ OTG1,
+ OTG2,
+ GPUMIX,
+ VPUMIX,
+ VPU_G1,
+ VPU_G2,
+ VPU_H1,
+ DISPMIX,
+ MIPI,
+ /* below two domain only for ATF internal use */
+ GPU2D,
+ GPU3D,
+ MAX_DOMAINS,
+};
+
+/* PU domain */
+static struct imx_pwr_domain pu_domains[] = {
+ IMX_MIX_DOMAIN(HSIOMIX, false),
+ IMX_PD_DOMAIN(PCIE, false),
+ IMX_PD_DOMAIN(OTG1, true),
+ IMX_PD_DOMAIN(OTG2, true),
+ IMX_MIX_DOMAIN(GPUMIX, false),
+ IMX_MIX_DOMAIN(VPUMIX, false),
+ IMX_PD_DOMAIN(VPU_G1, false),
+ IMX_PD_DOMAIN(VPU_G2, false),
+ IMX_PD_DOMAIN(VPU_H1, false),
+ IMX_MIX_DOMAIN(DISPMIX, false),
+ IMX_PD_DOMAIN(MIPI, false),
+ /* below two domain only for ATF internal use */
+ IMX_MIX_DOMAIN(GPU2D, false),
+ IMX_MIX_DOMAIN(GPU3D, false),
+};
+
+static unsigned int pu_domain_status;
+
+#define GPU_RCR 0x40
+#define VPU_RCR 0x44
+
+#define VPU_CTL_BASE 0x38330000
+#define BLK_SFT_RSTN_CSR 0x0
+#define H1_SFT_RSTN BIT(2)
+#define G1_SFT_RSTN BIT(1)
+#define G2_SFT_RSTN BIT(0)
+
+#define DISP_CTL_BASE 0x32e28000
+
+void vpu_sft_reset_assert(uint32_t domain_id)
+{
+ uint32_t val;
+
+ val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
+
+ switch (domain_id) {
+ case VPU_G1:
+ val &= ~G1_SFT_RSTN;
+ mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
+ break;
+ case VPU_G2:
+ val &= ~G2_SFT_RSTN;
+ mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
+ break;
+ case VPU_H1:
+ val &= ~H1_SFT_RSTN;
+ mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
+ break;
+ default:
+ break;
+ }
+}
+
+void vpu_sft_reset_deassert(uint32_t domain_id)
+{
+ uint32_t val;
+
+ val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR);
+
+ switch (domain_id) {
+ case VPU_G1:
+ val |= G1_SFT_RSTN;
+ mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
+ break;
+ case VPU_G2:
+ val |= G2_SFT_RSTN;
+ mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
+ break;
+ case VPU_H1:
+ val |= H1_SFT_RSTN;
+ mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val);
+ break;
+ default:
+ break;
+ }
+}
+
+void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on)
+{
+ if (domain_id >= MAX_DOMAINS) {
+ return;
+ }
+
+ struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id];
+
+ if (on) {
+ pu_domain_status |= (1 << domain_id);
+
+ if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
+ domain_id == VPU_H1) {
+ vpu_sft_reset_assert(domain_id);
+ }
+
+ /* HSIOMIX has no PU bit, so skip for it */
+ if (domain_id != HSIOMIX) {
+ /* clear the PGC bit */
+ mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
+
+ /* power up the domain */
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) {
+ ;
+ }
+ }
+
+ if (domain_id == VPU_G1 || domain_id == VPU_G2 ||
+ domain_id == VPU_H1) {
+ vpu_sft_reset_deassert(domain_id);
+ /* dealy for a while to make sure reset done */
+ udelay(100);
+ }
+
+ if (domain_id == GPUMIX) {
+ /* assert reset */
+ mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x1);
+
+ /* power up GPU2D */
+ mmio_clrbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
+
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU2D_PWR_REQ);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU2D_PWR_REQ) {
+ ;
+ }
+
+ udelay(1);
+
+ /* power up GPU3D */
+ mmio_clrbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
+
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU3D_PWR_REQ);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU3D_PWR_REQ) {
+ ;
+ }
+
+ udelay(10);
+ /* release the gpumix reset */
+ mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x0);
+ udelay(10);
+ }
+
+ /* vpu sft clock enable */
+ if (domain_id == VPUMIX) {
+ mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x1);
+ udelay(5);
+ mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x0);
+ udelay(5);
+
+ /* enable all clock */
+ mmio_write_32(VPU_CTL_BASE + 0x4, 0x7);
+ }
+
+ if (domain_id == DISPMIX) {
+ /* special setting for DISPMIX */
+ mmio_write_32(DISP_CTL_BASE + 0x4, 0x1fff);
+ mmio_write_32(DISP_CTL_BASE, 0x7f);
+ mmio_write_32(DISP_CTL_BASE + 0x8, 0x30000);
+ }
+
+ /* handle the ADB400 sync */
+ if (pwr_domain->need_sync) {
+ /* clear adb power down request */
+ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
+
+ /* wait for adb power request ack */
+ while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
+ ;
+ }
+ }
+
+ if (domain_id == GPUMIX) {
+ /* power up GPU2D ADB */
+ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
+
+ /* wait for adb power request ack */
+ while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
+ ;
+ }
+
+ /* power up GPU3D ADB */
+ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
+
+ /* wait for adb power request ack */
+ while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
+ ;
+ }
+ }
+ } else {
+ pu_domain_status &= ~(1 << domain_id);
+
+ if (domain_id == OTG1 || domain_id == OTG2) {
+ return;
+ }
+
+ /* GPU2D & GPU3D ADB power down */
+ if (domain_id == GPUMIX) {
+ mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC);
+
+ /* wait for adb power request ack */
+ while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) {
+ ;
+ }
+
+ mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC);
+
+ /* wait for adb power request ack */
+ while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) {
+ ;
+ }
+ }
+
+ /* handle the ADB400 sync */
+ if (pwr_domain->need_sync) {
+ /* set adb power down request */
+ mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
+
+ /* wait for adb power request ack */
+ while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
+ ;
+ }
+ }
+
+ if (domain_id == GPUMIX) {
+ /* power down GPU2D */
+ mmio_setbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1);
+
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU2D_PWR_REQ);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU2D_PWR_REQ) {
+ ;
+ }
+
+ /* power down GPU3D */
+ mmio_setbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1);
+
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU3D_PWR_REQ);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU3D_PWR_REQ) {
+ ;
+ }
+ }
+
+ /* HSIOMIX has no PU bit, so skip for it */
+ if (domain_id != HSIOMIX) {
+ /* set the PGC bit */
+ mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
+
+ /* power down the domain */
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) {
+ ;
+ }
+ }
+ }
+}
+
void imx_gpc_init(void)
{
unsigned int val;
@@ -50,7 +337,7 @@ void imx_gpc_init(void)
/*
* Set the CORE & SCU power up timing:
* SW = 0x1, SW2ISO = 0x1;
- * the CPU CORE and SCU power up timming counter
+ * the CPU CORE and SCU power up timing counter
* is drived by 32K OSC, each domain's power up
* latency is (SW + SW2ISO) / 32768
*/
@@ -85,7 +372,4 @@ void imx_gpc_init(void)
*/
mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
-
- /* enable all the power domain by default */
- mmio_write_32(IMX_GPC_BASE + PU_PGC_UP_TRG, 0x3fcf);
}
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c b/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c
index 937774c433..c39dd93e7f 100644
--- a/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c
+++ b/plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c
@@ -88,7 +88,7 @@ void bl2_el3_early_platform_setup(u_register_t arg1, u_register_t arg2,
imx8mm_usdhc_setup();
/* Open handles to a FIP image */
- plat_imx8mm_io_setup();
+ plat_imx_io_setup();
}
void bl2_el3_plat_arch_setup(void)
diff --git a/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c b/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c
index 40110d778b..bff8fb4e88 100644
--- a/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c
+++ b/plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019-2022 ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,19 +18,38 @@
#include <drivers/generic_delay_timer.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/mmio.h>
-#include <lib/xlat_tables/xlat_tables.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/platform.h>
+#include <dram.h>
#include <gpc.h>
#include <imx_aipstz.h>
#include <imx_uart.h>
#include <imx_rdc.h>
#include <imx8m_caam.h>
+#include <imx8m_ccm.h>
+#include <imx8m_csu.h>
+#include <imx8m_snvs.h>
#include <plat_imx8.h>
+#define TRUSTY_PARAMS_LEN_BYTES (4096*2)
+
+/*
+ * Note: DRAM region is mapped with entire size available and uses MT_RW
+ * attributes.
+ * See details in docs/plat/imx8m.rst "High Assurance Boot (HABv4)" section
+ * for explanation of this mapping scheme.
+ */
static const mmap_region_t imx_mmap[] = {
MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW),
MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW), /* AIPS map */
+ MAP_REGION_FLAT(OCRAM_S_BASE, OCRAM_S_SIZE, MT_DEVICE | MT_RW), /* OCRAM_S */
+ MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW), /* DDRMIX */
+ MAP_REGION_FLAT(IMX_VPUMIX_BASE, IMX_VPUMIX_SIZE, MT_DEVICE | MT_RW), /* VPUMIX */
+ MAP_REGION_FLAT(IMX_CAAM_RAM_BASE, IMX_CAAM_RAM_SIZE, MT_MEMORY | MT_RW), /* CAMM RAM */
+ MAP_REGION_FLAT(IMX_NS_OCRAM_BASE, IMX_NS_OCRAM_SIZE, MT_MEMORY | MT_RW), /* NS OCRAM */
+ MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO), /* ROM code */
+ MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS), /* DRAM */
{0},
};
@@ -44,9 +63,11 @@ static const struct aipstz_cfg aipstz[] = {
static const struct imx_rdc_cfg rdc[] = {
/* Master domain assignment */
- RDC_MDAn(0x1, DID1),
+ RDC_MDAn(RDC_MDA_M4, DID1),
/* peripherals domain permission */
+ RDC_PDAPn(RDC_PDAP_UART4, D1R | D1W),
+ RDC_PDAPn(RDC_PDAP_UART2, D0R | D0W),
/* memory region */
@@ -54,6 +75,40 @@ static const struct imx_rdc_cfg rdc[] = {
{0},
};
+static const struct imx_csu_cfg csu_cfg[] = {
+ /* peripherals csl setting */
+ CSU_CSLx(CSU_CSL_RDC, CSU_SEC_LEVEL_3, LOCKED),
+ CSU_CSLx(CSU_CSL_TZASC, CSU_SEC_LEVEL_5, LOCKED),
+ CSU_CSLx(CSU_CSL_CSU, CSU_SEC_LEVEL_5, LOCKED),
+
+ /* master HP0~1 */
+
+ /* SA setting */
+ CSU_SA(CSU_SA_M4, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_SDMA1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_PCIE_CTRL1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USB1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USB2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_VPU, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_GPU, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_APBHDMA, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_ENET, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USDHC1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USDHC2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USDHC3, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_HUGO, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_DAP, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_SDMA2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_SDMA3, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_LCDIF, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_CSI, NON_SEC_ACCESS, LOCKED),
+
+ /* HP control setting */
+
+ /* Sentinel */
+ {0}
+};
+
static entry_point_info_t bl32_image_ep_info;
static entry_point_info_t bl33_image_ep_info;
@@ -97,6 +152,7 @@ void bl31_tzc380_setup(void)
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
+ unsigned int console_base = IMX_BOOT_UART_BASE;
static console_t console;
int i;
@@ -109,13 +165,19 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
imx_rdc_init(rdc);
- imx8m_caam_init();
+ imx_csu_init(csu_cfg);
+
+ if (console_base == 0U) {
+ console_base = imx8m_uart_get_base();
+ }
- console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+ console_imx_uart_register(console_base, IMX_BOOT_UART_CLK_IN_HZ,
IMX_CONSOLE_BAUDRATE, &console);
/* This console is only used for boot stage */
console_set_scope(&console, CONSOLE_FLAG_BOOT);
+ imx8m_caam_init();
+
/*
* tell BL3-1 where the non-secure software image is located
* and the entry state information.
@@ -124,7 +186,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
bl33_image_ep_info.spsr = get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
-#ifdef SPD_opteed
+#if defined(SPD_opteed) || defined(SPD_trusty)
/* Populate entry point information for BL32 */
SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
@@ -134,26 +196,51 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
/* Pass TEE base and size to bl33 */
bl33_image_ep_info.args.arg1 = BL32_BASE;
bl33_image_ep_info.args.arg2 = BL32_SIZE;
+
+#ifdef SPD_trusty
+ bl32_image_ep_info.args.arg0 = BL32_SIZE;
+ bl32_image_ep_info.args.arg1 = BL32_BASE;
+#else
+ /* Make sure memory is clean */
+ mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0);
+ bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+ bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+#endif
+#endif
+
+#if !defined(SPD_opteed) && !defined(SPD_trusty)
+ enable_snvs_privileged_access();
#endif
bl31_tzc380_setup();
}
+#define MAP_BL31_TOTAL \
+ MAP_REGION_FLAT(BL31_START, BL31_SIZE, MT_MEMORY | MT_RW | MT_SECURE)
+#define MAP_BL31_RO \
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE)
+#define MAP_COHERENT_MEM \
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, \
+ MT_DEVICE | MT_RW | MT_SECURE)
+#define MAP_BL32_TOTAL \
+ MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW)
+
void bl31_plat_arch_setup(void)
{
- mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE),
- MT_MEMORY | MT_RW | MT_SECURE);
- mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE),
- MT_MEMORY | MT_RO | MT_SECURE);
+ const mmap_region_t bl_regions[] = {
+ MAP_BL31_TOTAL,
+ MAP_BL31_RO,
#if USE_COHERENT_MEM
- mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE,
- (BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE),
- MT_DEVICE | MT_RW | MT_SECURE);
+ MAP_COHERENT_MEM,
#endif
- mmap_add(imx_mmap);
-
- init_xlat_tables();
+#if defined(SPD_opteed) || defined(SPD_trusty)
+ /* Map TEE memory */
+ MAP_BL32_TOTAL,
+#endif
+ {0}
+ };
+ setup_page_tables(bl_regions, imx_mmap);
enable_mmu_el3(0);
}
@@ -164,6 +251,9 @@ void bl31_platform_setup(void)
/* select the CKIL source to 32K OSC */
mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1);
+ /* Init the dram info */
+ dram_info_init(SAVED_DRAM_TIMING_BASE);
+
plat_gic_driver_init();
plat_gic_init();
@@ -184,3 +274,12 @@ unsigned int plat_get_syscnt_freq2(void)
{
return COUNTER_FREQUENCY;
}
+
+#ifdef SPD_trusty
+void plat_trusty_set_boot_args(aapcs64_params_t *args)
+{
+ args->arg0 = BL32_SIZE;
+ args->arg1 = BL32_BASE;
+ args->arg2 = TRUSTY_PARAMS_LEN_BYTES;
+}
+#endif
diff --git a/plat/imx/imx8m/imx8mm/include/imx8mm_private.h b/plat/imx/imx8m/imx8mm/include/imx8mm_private.h
index 52d13f031f..5e0ef972f8 100644
--- a/plat/imx/imx8m/imx8mm/include/imx8mm_private.h
+++ b/plat/imx/imx8m/imx8mm/include/imx8mm_private.h
@@ -10,6 +10,6 @@
/*******************************************************************************
* Function and variable prototypes
******************************************************************************/
-void plat_imx8mm_io_setup(void);
+void plat_imx_io_setup(void);
#endif /* IMX8MM_PRIVATE_H */
diff --git a/plat/imx/imx8m/imx8mm/include/imx_sec_def.h b/plat/imx/imx8m/imx8mm/include/imx_sec_def.h
new file mode 100644
index 0000000000..d53c922d37
--- /dev/null
+++ b/plat/imx/imx8m/imx8mm/include/imx_sec_def.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2020-2022 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_SEC_DEF_H
+#define IMX_SEC_DEF_H
+
+/* RDC MDA index */
+enum rdc_mda_idx {
+ RDC_MDA_A53 = 0,
+ RDC_MDA_M4 = 1,
+ RDC_MDA_PCIE_CTRL1 = 2,
+ RDC_MDA_SDMA3p = 3,
+ RDC_MDA_VPU_Decoders = 4,
+ RDC_MDA_LCDIF = 5,
+ RDC_MDA_CSI1 = 6,
+ RDC_MDA_SDMA3b = 7,
+ RDC_MDA_Coresight = 8,
+ RDC_MDA_DAP = 9,
+ RDC_MDA_CAAM = 10,
+ RDC_MDA_SDMA1p = 11,
+ RDC_MDA_SDMA1b = 12,
+ RDC_MDA_APBHDMA = 13,
+ RDC_MDA_NAND = 14,
+ RDC_MDA_uSDHC1 = 15,
+ RDC_MDA_uSDHC2 = 16,
+ RDC_MDA_uSDHC3 = 17,
+ RDC_MDA_GPU = 18,
+ RDC_MDA_USB1 = 19,
+ RDC_MDA_USB2 = 20,
+ RDC_MDA_TESTPORT = 21,
+ RDC_MDA_ENET1_TX = 22,
+ RDC_MDA_ENET1_RX = 23,
+ RDC_MDA_SDMA2p = 24,
+ RDC_MDA_SDMA2b = 24,
+ RDC_MDA_SDMA2_to_SPBA2 = 24,
+ RDC_MDA_SDMA3_to_SPBA2 = 25,
+ RDC_MDA_SDMA1_to_SPBA1 = 26,
+};
+
+/* RDC Peripherals index */
+enum rdc_pdap_idx {
+ RDC_PDAP_GPIO2 = 1,
+ RDC_PDAP_GPIO3 = 2,
+ RDC_PDAP_GPIO4 = 3,
+ RDC_PDAP_GPIO5 = 4,
+ RDC_PDAP_ANA_TSENSOR = 6,
+ RDC_PDAP_ANA_OSC = 7,
+ RDC_PDAP_WDOG1 = 8,
+ RDC_PDAP_WDOG2 = 9,
+ RDC_PDAP_WDOG3 = 10,
+ RDC_PDAP_SDMA3 = 11,
+ RDC_PDAP_SDMA2 = 12,
+ RDC_PDAP_GPT1 = 13,
+ RDC_PDAP_GPT2 = 14,
+ RDC_PDAP_GPT3 = 15,
+ RDC_PDAP_ROMCP = 17,
+ RDC_PDAP_IOMUXC = 19,
+ RDC_PDAP_IOMUXC_GPR = 20,
+ RDC_PDAP_OCOTP_CTRL = 21,
+ RDC_PDAP_ANA_PLL = 22,
+ RDC_PDAP_SNVS_HP = 23,
+ RDC_PDAP_CCM = 24,
+ RDC_PDAP_SRC = 25,
+ RDC_PDAP_GPC = 26,
+ RDC_PDAP_SEMAPHORE1 = 27,
+ RDC_PDAP_SEMAPHORE2 = 28,
+ RDC_PDAP_RDC = 29,
+ RDC_PDAP_CSU = 30,
+ RDC_PDAP_LCDIF = 32,
+ RDC_PDAP_MIPI_DSI = 33,
+ RDC_PDAP_CSI = 34,
+ RDC_PDAP_MIPI_CSI = 35,
+ RDC_PDAP_USB1 = 36,
+ RDC_PDAP_PWM1 = 38,
+ RDC_PDAP_PWM2 = 39,
+ RDC_PDAP_PWM3 = 40,
+ RDC_PDAP_PWM4 = 41,
+ RDC_PDAP_System_Counter_RD = 42,
+ RDC_PDAP_System_Counter_CMP = 43,
+ RDC_PDAP_System_Counter_CTRL = 44,
+ RDC_PDAP_GPT6 = 46,
+ RDC_PDAP_GPT5 = 47,
+ RDC_PDAP_GPT4 = 48,
+ RDC_PDAP_TZASC = 56,
+ RDC_PDAP_USB2 = 59,
+ RDC_PDAP_PERFMON1 = 60,
+ RDC_PDAP_PERFMON2 = 61,
+ RDC_PDAP_PLATFORM_CTRL = 62,
+ RDC_PDAP_QoSC = 63,
+ RDC_PDAP_I2C1 = 66,
+ RDC_PDAP_I2C2 = 67,
+ RDC_PDAP_I2C3 = 68,
+ RDC_PDAP_I2C4 = 69,
+ RDC_PDAP_UART4 = 70,
+ RDC_PDAP_MU_A = 74,
+ RDC_PDAP_MU_B = 75,
+ RDC_PDAP_SEMAPHORE_HS = 76,
+ RDC_PDAP_SAI1 = 78,
+ RDC_PDAP_SAI2 = 79,
+ RDC_PDAP_SAI3 = 80,
+ RDC_PDAP_SAI5 = 82,
+ RDC_PDAP_SAI6 = 83,
+ RDC_PDAP_uSDHC1 = 84,
+ RDC_PDAP_uSDHC2 = 85,
+ RDC_PDAP_uSDHC3 = 86,
+ RDC_PDAP_PCIE_PHY1 = 88,
+ RDC_PDAP_SPBA2 = 90,
+ RDC_PDAP_QSPI = 91,
+ RDC_PDAP_SDMA1 = 93,
+ RDC_PDAP_ENET1 = 94,
+ RDC_PDAP_SPDIF1 = 97,
+ RDC_PDAP_eCSPI1 = 98,
+ RDC_PDAP_eCSPI2 = 99,
+ RDC_PDAP_eCSPI3 = 100,
+ RDC_PDAP_MICFIL = 101,
+ RDC_PDAP_UART1 = 102,
+ RDC_PDAP_UART3 = 104,
+ RDC_PDAP_UART2 = 105,
+ RDC_PDAP_SPDIF2 = 106,
+ RDC_PDAP_SPBA1 = 111,
+ RDC_PDAP_CAAM = 114,
+};
+
+enum csu_csl_idx {
+ CSU_CSL_GPIO1 = 0,
+ CSU_CSL_GPIO2 = 1,
+ CSU_CSL_GPIO3 = 2,
+ CSU_CSL_GPIO4 = 3,
+ CSU_CSL_GPIO5 = 4,
+ CSU_CSL_ANA_TSENSOR = 6,
+ CSU_CSL_ANA_OSC = 7,
+ CSU_CSL_WDOG1 = 8,
+ CSU_CSL_WDOG2 = 9,
+ CSU_CSL_WDOG3 = 10,
+ CSU_CSL_SDMA2 = 12,
+ CSU_CSL_GPT1 = 13,
+ CSU_CSL_GPT2 = 14,
+ CSU_CSL_GPT3 = 15,
+ CSU_CSL_ROMCP = 17,
+ CSU_CSL_LCDIF = 18,
+ CSU_CSL_IOMUXC = 19,
+ CSU_CSL_IOMUXC_GPR = 20,
+ CSU_CSL_OCOTP_CTRL = 21,
+ CSU_CSL_ANA_PLL = 22,
+ CSU_CSL_SNVS_HP = 23,
+ CSU_CSL_CCM = 24,
+ CSU_CSL_SRC = 25,
+ CSU_CSL_GPC = 26,
+ CSU_CSL_SEMAPHORE1 = 27,
+ CSU_CSL_SEMAPHORE2 = 28,
+ CSU_CSL_RDC = 29,
+ CSU_CSL_CSU = 30,
+ CSU_CSL_DC_MST0 = 32,
+ CSU_CSL_DC_MST1 = 33,
+ CSU_CSL_DC_MST2 = 34,
+ CSU_CSL_DC_MST3 = 35,
+ CSU_CSL_PWM1 = 38,
+ CSU_CSL_PWM2 = 39,
+ CSU_CSL_PWM3 = 40,
+ CSU_CSL_PWM4 = 41,
+ CSU_CSL_System_Counter_RD = 42,
+ CSU_CSL_System_Counter_CMP = 43,
+ CSU_CSL_System_Counter_CTRL = 44,
+ CSU_CSL_GPT6 = 46,
+ CSU_CSL_GPT5 = 47,
+ CSU_CSL_GPT4 = 48,
+ CSU_CSL_TZASC = 56,
+ CSU_CSL_MTR = 59,
+ CSU_CSL_PERFMON1 = 60,
+ CSU_CSL_PERFMON2 = 61,
+ CSU_CSL_PLATFORM_CTRL = 62,
+ CSU_CSL_QoSC = 63,
+ CSU_CSL_MIPI_PHY = 64,
+ CSU_CSL_MIPI_DSI = 65,
+ CSU_CSL_I2C1 = 66,
+ CSU_CSL_I2C2 = 67,
+ CSU_CSL_I2C3 = 68,
+ CSU_CSL_I2C4 = 69,
+ CSU_CSL_UART4 = 70,
+ CSU_CSL_MIPI_CSI1 = 71,
+ CSU_CSL_MIPI_CSI_PHY1 = 72,
+ CSU_CSL_CSI1 = 73,
+ CSU_CSL_MU_A = 74,
+ CSU_CSL_MU_B = 75,
+ CSU_CSL_SEMAPHORE_HS = 76,
+ CSU_CSL_SAI1 = 78,
+ CSU_CSL_SAI6 = 80,
+ CSU_CSL_SAI5 = 81,
+ CSU_CSL_SAI4 = 82,
+ CSU_CSL_uSDHC1 = 84,
+ CSU_CSL_uSDHC2 = 85,
+ CSU_CSL_MIPI_CSI2 = 86,
+ CSU_CSL_MIPI_CSI_PHY2 = 87,
+ CSU_CSL_CSI2 = 88,
+ CSU_CSL_SPBA2 = 90,
+ CSU_CSL_QSPI = 91,
+ CSU_CSL_SDMA1 = 93,
+ CSU_CSL_ENET1 = 94,
+ CSU_CSL_SPDIF1 = 97,
+ CSU_CSL_eCSPI1 = 98,
+ CSU_CSL_eCSPI2 = 99,
+ CSU_CSL_eCSPI3 = 100,
+ CSU_CSL_UART1 = 102,
+ CSU_CSL_UART3 = 104,
+ CSU_CSL_UART2 = 105,
+ CSU_CSL_SPDIF2 = 106,
+ CSU_CSL_SAI2 = 107,
+ CSU_CSL_SAI3 = 108,
+ CSU_CSL_SPBA1 = 111,
+ CSU_CSL_CAAM = 114,
+};
+
+enum csu_sa_idx {
+ CSU_SA_M4 = 1,
+ CSU_SA_SDMA1 = 2,
+ CSU_SA_PCIE_CTRL1 = 3,
+ CSU_SA_USB1 = 4,
+ CSU_SA_USB2 = 5,
+ CSU_SA_VPU = 6,
+ CSU_SA_GPU = 7,
+ CSU_SA_APBHDMA = 8,
+ CSU_SA_ENET = 9,
+ CSU_SA_USDHC1 = 10,
+ CSU_SA_USDHC2 = 11,
+ CSU_SA_USDHC3 = 12,
+ CSU_SA_HUGO = 13,
+ CSU_SA_DAP = 14,
+ CSU_SA_SDMA2 = 15,
+ CSU_SA_CAAM = 16,
+ CSU_SA_SDMA3 = 17,
+ CSU_SA_LCDIF = 18,
+ CSU_SA_CSI = 19,
+};
+
+#endif /* IMX_SEC_DEF_H */
diff --git a/plat/imx/imx8m/imx8mm/include/platform_def.h b/plat/imx/imx8m/imx8mm/include/platform_def.h
index f8efa5659f..349233abc1 100644
--- a/plat/imx/imx8m/imx8mm/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mm/include/platform_def.h
@@ -1,10 +1,13 @@
/*
- * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <arch.h>
#include <common/tbbr/tbbr_img_def.h>
+#include <lib/utils_def.h>
+#include <plat/common/common_def.h>
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
@@ -38,25 +41,31 @@
#if defined(NEED_BL2)
#define BL2_BASE U(0x920000)
-#define BL2_LIMIT U(0x940000)
+#define BL2_SIZE SZ_128K
+#define BL2_LIMIT (BL2_BASE + BL2_SIZE)
#define BL31_BASE U(0x900000)
-#define BL31_LIMIT U(0x920000)
-#define IMX8MM_FIP_BASE U(0x40310000)
-#define IMX8MM_FIP_SIZE U(0x000200000)
-#define IMX8MM_FIP_LIMIT U(FIP_BASE + FIP_SIZE)
+#define IMX_FIP_BASE U(0x40310000)
+#define IMX_FIP_SIZE U(0x000300000)
+#define IMX_FIP_LIMIT U(FIP_BASE + FIP_SIZE)
/* Define FIP image location on eMMC */
-#define IMX8MM_FIP_MMC_BASE U(0x100000)
+#define IMX_FIP_MMC_BASE U(0x100000)
#define PLAT_IMX8MM_BOOT_MMC_BASE U(0x30B50000) /* SD */
#else
#define BL31_BASE U(0x920000)
-#define BL31_LIMIT U(0x940000)
#endif
+#define BL31_SIZE SZ_128K
+#define BL31_LIMIT (BL31_BASE + BL31_SIZE)
+
/* non-secure uboot base */
+#ifndef PLAT_NS_IMAGE_OFFSET
#define PLAT_NS_IMAGE_OFFSET U(0x40200000)
-#define PLAT_NS_IMAGE_SIZE U(0x00100000)
+#endif
+#define PLAT_NS_IMAGE_SIZE U(0x00200000)
+
+#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000)
/* GICv3 base address */
#define PLAT_GICD_BASE U(0x38800000)
@@ -82,7 +91,7 @@
#define IMX_AIPSTZ4 U(0x32df0000)
#define IMX_AIPS_BASE U(0x30000000)
-#define IMX_AIPS_SIZE U(0xC00000)
+#define IMX_AIPS_SIZE U(0x3000000)
#define IMX_GPV_BASE U(0x32000000)
#define IMX_GPV_SIZE U(0x800000)
#define IMX_AIPS1_BASE U(0x30200000)
@@ -102,7 +111,17 @@
#define IMX_DDRC_BASE U(0x3d400000)
#define IMX_DDRPHY_BASE U(0x3c000000)
#define IMX_DDR_IPS_BASE U(0x3d000000)
+#define IMX_DDR_IPS_SIZE U(0x1800000)
+#define IMX_VPUMIX_BASE U(0x38330000)
+#define IMX_VPUMIX_SIZE U(0x100000)
#define IMX_ROM_BASE U(0x0)
+#define IMX_ROM_SIZE U(0x40000)
+#define IMX_NS_OCRAM_BASE U(0x900000)
+#define IMX_NS_OCRAM_SIZE U(0x20000)
+#define IMX_CAAM_RAM_BASE U(0x100000)
+#define IMX_CAAM_RAM_SIZE U(0x10000)
+#define IMX_DRAM_BASE U(0x40000000)
+#define IMX_DRAM_SIZE U(0xc0000000)
#define GPV_BASE U(0x32000000)
#define GPV_SIZE U(0x800000)
@@ -137,12 +156,14 @@
#define GPR_TZASC_EN_LOCK BIT(16)
#define ANAMIX_MISC_CTL U(0x124)
+#define DRAM_PLL_CTRL (IMX_ANAMIX_BASE + 0x50)
#define MAX_CSU_NUM U(64)
#define OCRAM_S_BASE U(0x00180000)
#define OCRAM_S_SIZE U(0x8000)
#define OCRAM_S_LIMIT (OCRAM_S_BASE + OCRAM_S_SIZE)
+#define SAVED_DRAM_TIMING_BASE OCRAM_S_BASE
#define COUNTER_FREQUENCY 8000000 /* 8MHz */
@@ -151,3 +172,7 @@
#define MAX_IO_HANDLES 3U
#define MAX_IO_DEVICES 2U
#define MAX_IO_BLOCK_DEVICES 1U
+
+#define PLAT_IMX8M_DTO_BASE 0x53000000
+#define PLAT_IMX8M_DTO_MAX_SIZE 0x1000
+#define PLAT_IMX_EVENT_LOG_MAX_SIZE UL(0x400)
diff --git a/plat/imx/imx8m/imx8mm/platform.mk b/plat/imx/imx8m/imx8mm/platform.mk
index 186323393f..6136820874 100644
--- a/plat/imx/imx8m/imx8mm/platform.mk
+++ b/plat/imx/imx8m/imx8mm/platform.mk
@@ -1,18 +1,30 @@
#
-# Copyright (c) 2019-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2019-2023, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+#
+# Translation tables library
+include lib/xlat_tables_v2/xlat_tables.mk
PLAT_INCLUDES := -Iplat/imx/common/include \
-Iplat/imx/imx8m/include \
-Iplat/imx/imx8m/imx8mm/include \
-Idrivers/imx/usdhc \
- -Iinclude/common/tbbr
+ -Iinclude/common/tbbr \
+ -Iinclude/lib/libfdt
# Include GICv3 driver files
include drivers/arm/gic/v3/gicv3.mk
+include lib/libfdt/libfdt.mk
+
+IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \
+ plat/imx/imx8m/ddr/clock.c \
+ plat/imx/imx8m/ddr/dram_retention.c \
+ plat/imx/imx8m/ddr/ddr4_dvfs.c \
+ plat/imx/imx8m/ddr/lpddr4_dvfs.c
+
IMX_GIC_SOURCES := ${GICV3_SOURCES} \
plat/common/plat_gicv3.c \
plat/common/plat_psci_common.c \
@@ -20,10 +32,14 @@ IMX_GIC_SOURCES := ${GICV3_SOURCES} \
BL31_SOURCES += plat/imx/common/imx8_helpers.S \
plat/imx/imx8m/gpc_common.c \
+ plat/imx/imx8m/imx_hab.c \
plat/imx/imx8m/imx_aipstz.c \
plat/imx/imx8m/imx_rdc.c \
+ plat/imx/imx8m/imx8m_csu.c \
plat/imx/imx8m/imx8m_caam.c \
+ plat/imx/imx8m/imx8m_ccm.c \
plat/imx/imx8m/imx8m_psci_common.c \
+ plat/imx/imx8m/imx8m_snvs.c \
plat/imx/imx8m/imx8mm/imx8mm_bl31_setup.c \
plat/imx/imx8m/imx8mm/imx8mm_psci.c \
plat/imx/imx8m/imx8mm/gpc.c \
@@ -31,18 +47,17 @@ BL31_SOURCES += plat/imx/common/imx8_helpers.S \
plat/imx/common/imx_sip_handler.c \
plat/imx/common/imx_sip_svc.c \
plat/imx/common/imx_uart_console.S \
- plat/imx/common/imx_ehf.c \
- plat/imx/common/imx_sdei.c \
- lib/xlat_tables/aarch64/xlat_tables.c \
- lib/xlat_tables/xlat_tables_common.c \
lib/cpus/aarch64/cortex_a53.S \
drivers/arm/tzc/tzc380.c \
drivers/delay_timer/delay_timer.c \
drivers/delay_timer/generic_delay_timer.c \
+ ${XLAT_TABLES_LIB_SRCS} \
+ ${IMX_DRAM_SOURCES} \
${IMX_GIC_SOURCES}
ifeq (${NEED_BL2},yes)
BL2_SOURCES += common/desc_image_load.c \
+ common/fdt_wrappers.c \
plat/imx/common/imx8_helpers.S \
plat/imx/common/imx_uart_console.S \
plat/imx/imx8m/imx8mm/imx8mm_bl2_el3_setup.c \
@@ -63,8 +78,8 @@ BL2_SOURCES += common/desc_image_load.c \
drivers/io/io_storage.c \
drivers/imx/usdhc/imx_usdhc.c \
plat/imx/imx8m/imx8mm/imx8mm_bl2_mem_params_desc.c \
- plat/imx/imx8m/imx8mm/imx8mm_io_storage.c \
- plat/imx/imx8m/imx8mm/imx8mm_image_load.c \
+ plat/imx/common/imx_io_storage.c \
+ plat/imx/imx8m/imx8m_image_load.c \
lib/optee/optee_utils.c
endif
@@ -88,7 +103,7 @@ ifeq (${NEED_BL2},yes)
$(eval $(call add_define,NEED_BL2))
LOAD_IMAGE_V2 := 1
# Non-TF Boot ROM
-BL2_AT_EL3 := 1
+RESET_TO_BL2 := 1
endif
ifneq (${TRUSTED_BOARD_BOOT},0)
@@ -120,15 +135,16 @@ certificates: $(ROT_KEY)
$(ROT_KEY): | $(BUILD_PLAT)
@echo " OPENSSL $@"
@if [ ! -f $(ROT_KEY) ]; then \
- openssl genrsa 2048 > $@ 2>/dev/null; \
+ ${OPENSSL_BIN_PATH}/openssl genrsa 2048 > $@ 2>/dev/null; \
fi
$(ROTPK_HASH): $(ROT_KEY)
@echo " OPENSSL $@"
- $(Q)openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
- openssl dgst -sha256 -binary > $@ 2>/dev/null
+ $(Q)${OPENSSL_BIN_PATH}/openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+ ${OPENSSL_BIN_PATH}/openssl dgst -sha256 -binary > $@ 2>/dev/null
endif
+ENABLE_PIE := 1
USE_COHERENT_MEM := 1
RESET_TO_BL31 := 1
A53_DISABLE_NON_TEMPORAL_HINT := 0
@@ -137,6 +153,10 @@ ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+ifneq (${PRELOADED_BL33_BASE},)
+$(eval $(call add_define_val,PLAT_NS_IMAGE_OFFSET,${PRELOADED_BL33_BASE}))
+endif
+
BL32_BASE ?= 0xbe000000
$(eval $(call add_define,BL32_BASE))
@@ -144,7 +164,31 @@ BL32_SIZE ?= 0x2000000
$(eval $(call add_define,BL32_SIZE))
IMX_BOOT_UART_BASE ?= 0x30890000
+ifeq (${IMX_BOOT_UART_BASE},auto)
+ override IMX_BOOT_UART_BASE := 0
+endif
$(eval $(call add_define,IMX_BOOT_UART_BASE))
-EL3_EXCEPTION_HANDLING := 1
-SDEI_SUPPORT := 1
+EL3_EXCEPTION_HANDLING := $(SDEI_SUPPORT)
+ifeq (${SDEI_SUPPORT}, 1)
+BL31_SOURCES += plat/imx/common/imx_ehf.c \
+ plat/imx/common/imx_sdei.c
+endif
+
+ifeq (${MEASURED_BOOT},1)
+ MEASURED_BOOT_MK := drivers/measured_boot/event_log/event_log.mk
+ $(info Including ${MEASURED_BOOT_MK})
+ include ${MEASURED_BOOT_MK}
+
+ifneq (${MBOOT_EL_HASH_ALG}, sha256)
+ $(eval $(call add_define,TF_MBEDTLS_MBOOT_USE_SHA512))
+endif
+
+BL2_SOURCES += plat/imx/imx8m/imx8m_measured_boot.c \
+ plat/imx/imx8m/imx8m_dyn_cfg_helpers.c \
+ ${EVENT_LOG_SOURCES}
+endif
+
+ifeq (${SPD},trusty)
+ BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1
+endif
diff --git a/plat/imx/imx8m/imx8mn/gpc.c b/plat/imx/imx8m/imx8mn/gpc.c
index 37d4226a6f..20c9a55610 100644
--- a/plat/imx/imx8m/imx8mn/gpc.c
+++ b/plat/imx/imx8m/imx8mn/gpc.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-2020 NXP
+ * Copyright 2019-2022 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -21,6 +21,124 @@
#define CCGR(x) (0x4000 + (x) * 0x10)
+#define MIPI_PWR_REQ BIT(0)
+#define OTG1_PWR_REQ BIT(2)
+#define HSIOMIX_PWR_REQ BIT(4)
+#define GPUMIX_PWR_REQ BIT(7)
+#define DISPMIX_PWR_REQ BIT(10)
+
+#define HSIOMIX_ADB400_SYNC BIT(5)
+#define DISPMIX_ADB400_SYNC BIT(7)
+#define GPUMIX_ADB400_SYNC (0x5 << 9)
+#define HSIOMIX_ADB400_ACK BIT(23)
+#define DISPMIX_ADB400_ACK BIT(25)
+#define GPUMIX_ADB400_ACK (0x5 << 27)
+
+#define MIPI_PGC 0xc00
+#define OTG1_PGC 0xc80
+#define HSIOMIX_PGC 0xd00
+#define GPUMIX_PGC 0xdc0
+#define DISPMIX_PGC 0xe80
+
+enum pu_domain_id {
+ HSIOMIX,
+ OTG1 = 2,
+ GPUMIX = 4,
+ DISPMIX = 9,
+ MIPI,
+};
+
+/* PU domain, add some hole to minimize the uboot change */
+static struct imx_pwr_domain pu_domains[11] = {
+ [HSIOMIX] = IMX_MIX_DOMAIN(HSIOMIX, false),
+ [OTG1] = IMX_PD_DOMAIN(OTG1, true),
+ [GPUMIX] = IMX_MIX_DOMAIN(GPUMIX, false),
+ [DISPMIX] = IMX_MIX_DOMAIN(DISPMIX, false),
+ [MIPI] = IMX_PD_DOMAIN(MIPI, true),
+};
+
+static unsigned int pu_domain_status;
+
+void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on)
+{
+ if (domain_id > MIPI) {
+ return;
+ }
+
+ struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id];
+
+ if (on) {
+ if (pwr_domain->need_sync) {
+ pu_domain_status |= (1 << domain_id);
+ }
+
+ /* HSIOMIX has no PU bit, so skip for it */
+ if (domain_id != HSIOMIX) {
+ /* clear the PGC bit */
+ mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
+
+ /* power up the domain */
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) {
+ ;
+ }
+ }
+
+ if (domain_id == DISPMIX) {
+ /* de-reset bus_blk clk and
+ * enable bus_blk clk
+ */
+ mmio_write_32(0x32e28000, 0x100);
+ mmio_write_32(0x32e28004, 0x100);
+ }
+
+ /* handle the ADB400 sync */
+ if (pwr_domain->need_sync) {
+ /* clear adb power down request */
+ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
+
+ /* wait for adb power request ack */
+ while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
+ ;
+ }
+ }
+ } else {
+ pu_domain_status &= ~(1 << domain_id);
+
+ if (domain_id == OTG1) {
+ return;
+ }
+
+ /* handle the ADB400 sync */
+ if (pwr_domain->need_sync) {
+
+ /* set adb power down request */
+ mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync);
+
+ /* wait for adb power request ack */
+ while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) {
+ ;
+ }
+ }
+
+ /* HSIOMIX has no PU bit, so skip for it */
+ if (domain_id != HSIOMIX) {
+ /* set the PGC bit */
+ mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1);
+
+ /* power down the domain */
+ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req);
+
+ /* wait for power request done */
+ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) {
+ ;
+ }
+ }
+ }
+}
+
void imx_gpc_init(void)
{
unsigned int val;
@@ -52,7 +170,7 @@ void imx_gpc_init(void)
/*
* Set the CORE & SCU power up timing:
* SW = 0x1, SW2ISO = 0x1;
- * the CPU CORE and SCU power up timming counter
+ * the CPU CORE and SCU power up timing counter
* is drived by 32K OSC, each domain's power up
* latency is (SW + SW2ISO) / 32768
*/
@@ -86,9 +204,4 @@ void imx_gpc_init(void)
* only need to do it once.
*/
mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
-
- /* enable all the power domain by default */
- for (i = 0; i < 103; i++)
- mmio_write_32(IMX_CCM_BASE + CCGR(i), 0x3);
- mmio_write_32(IMX_GPC_BASE + PU_PGC_UP_TRG, 0x485);
}
diff --git a/plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c b/plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c
index d4705eeeb4..f9e430bf96 100644
--- a/plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c
+++ b/plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-2020 NXP
+ * Copyright 2019-2022 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -19,16 +19,24 @@
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/platform.h>
+#include <dram.h>
#include <gpc.h>
#include <imx_aipstz.h>
#include <imx_uart.h>
#include <imx_rdc.h>
#include <imx8m_caam.h>
+#include <imx8m_ccm.h>
+#include <imx8m_csu.h>
+#include <imx8m_snvs.h>
#include <platform_def.h>
#include <plat_imx8.h>
+#define TRUSTY_PARAMS_LEN_BYTES (4096*2)
+
static const mmap_region_t imx_mmap[] = {
- GIC_MAP, AIPS_MAP, OCRAM_S_MAP, DDRC_MAP, {0},
+ GIC_MAP, AIPS_MAP, OCRAM_S_MAP, DDRC_MAP,
+ CAAM_RAM_MAP, NS_OCRAM_MAP, ROM_MAP, DRAM_MAP,
+ {0},
};
static const struct aipstz_cfg aipstz[] = {
@@ -41,9 +49,11 @@ static const struct aipstz_cfg aipstz[] = {
static const struct imx_rdc_cfg rdc[] = {
/* Master domain assignment */
- RDC_MDAn(0x1, DID1),
+ RDC_MDAn(RDC_MDA_M7, DID1),
/* peripherals domain permission */
+ RDC_PDAPn(RDC_PDAP_UART4, D1R | D1W),
+ RDC_PDAPn(RDC_PDAP_UART2, D0R | D0W),
/* memory region */
RDC_MEM_REGIONn(16, 0x0, 0x0, 0xff),
@@ -54,6 +64,22 @@ static const struct imx_rdc_cfg rdc[] = {
{0},
};
+static const struct imx_csu_cfg csu_cfg[] = {
+ /* peripherals csl setting */
+ CSU_CSLx(CSU_CSL_OCRAM, CSU_SEC_LEVEL_2, UNLOCKED),
+ CSU_CSLx(CSU_CSL_OCRAM_S, CSU_SEC_LEVEL_2, UNLOCKED),
+
+ /* master HP0~1 */
+
+ /* SA setting */
+
+ /* HP control setting */
+
+ /* Sentinel */
+ {0}
+};
+
+
static entry_point_info_t bl32_image_ep_info;
static entry_point_info_t bl33_image_ep_info;
@@ -97,7 +123,9 @@ static void bl31_tzc380_setup(void)
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
+ unsigned int console_base = IMX_BOOT_UART_BASE;
static console_t console;
+ unsigned int val;
int i;
/* Enable CSU NS access permission */
@@ -109,13 +137,31 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
imx_rdc_init(rdc);
- imx8m_caam_init();
+ imx_csu_init(csu_cfg);
+
+ /*
+ * Configure the force_incr programmable bit in GPV_5 of PL301_display, which fixes
+ * partial write issue. The AXI2AHB bridge is used for masters that access the TCM
+ * through system bus. Please refer to errata ERR050362 for more information.
+ */
+ mmio_setbits_32((GPV5_BASE_ADDR + FORCE_INCR_OFFSET), FORCE_INCR_BIT_MASK);
+
+ /* config the ocram memory range for secure access */
+ mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, 0x4c1);
+ val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x2c);
+ mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, val | 0x3DFF0000);
- console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+ if (console_base == 0U) {
+ console_base = imx8m_uart_get_base();
+ }
+
+ console_imx_uart_register(console_base, IMX_BOOT_UART_CLK_IN_HZ,
IMX_CONSOLE_BAUDRATE, &console);
/* This console is only used for boot stage */
console_set_scope(&console, CONSOLE_FLAG_BOOT);
+ imx8m_caam_init();
+
/*
* tell BL3-1 where the non-secure software image is located
* and the entry state information.
@@ -124,7 +170,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
bl33_image_ep_info.spsr = get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
-#ifdef SPD_opteed
+#if defined(SPD_opteed) || defined(SPD_trusty)
/* Populate entry point information for BL32 */
SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
@@ -134,26 +180,51 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
/* Pass TEE base and size to bl33 */
bl33_image_ep_info.args.arg1 = BL32_BASE;
bl33_image_ep_info.args.arg2 = BL32_SIZE;
+
+#ifdef SPD_trusty
+ bl32_image_ep_info.args.arg0 = BL32_SIZE;
+ bl32_image_ep_info.args.arg1 = BL32_BASE;
+#else
+ /* Make sure memory is clean */
+ mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0);
+ bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+ bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+#endif
+#endif
+
+#if !defined(SPD_opteed) && !defined(SPD_trusty)
+ enable_snvs_privileged_access();
#endif
bl31_tzc380_setup();
}
+#define MAP_BL31_TOTAL \
+ MAP_REGION_FLAT(BL31_START, BL31_SIZE, MT_MEMORY | MT_RW | MT_SECURE)
+#define MAP_BL31_RO \
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE)
+#define MAP_COHERENT_MEM \
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, \
+ MT_DEVICE | MT_RW | MT_SECURE)
+#define MAP_BL32_TOTAL \
+ MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW)
+
void bl31_plat_arch_setup(void)
{
- mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE),
- MT_MEMORY | MT_RW | MT_SECURE);
- mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE),
- MT_MEMORY | MT_RO | MT_SECURE);
+ const mmap_region_t bl_regions[] = {
+ MAP_BL31_TOTAL,
+ MAP_BL31_RO,
#if USE_COHERENT_MEM
- mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE,
- (BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE),
- MT_DEVICE | MT_RW | MT_SECURE);
+ MAP_COHERENT_MEM,
#endif
- mmap_add(imx_mmap);
-
- init_xlat_tables();
+#if defined(SPD_opteed) || defined(SPD_trusty)
+ /* Map TEE memory */
+ MAP_BL32_TOTAL,
+#endif
+ {0}
+ };
+ setup_page_tables(bl_regions, imx_mmap);
enable_mmu_el3(0);
}
@@ -164,6 +235,9 @@ void bl31_platform_setup(void)
/* select the CKIL source to 32K OSC */
mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1);
+ /* Init the dram info */
+ dram_info_init(SAVED_DRAM_TIMING_BASE);
+
plat_gic_driver_init();
plat_gic_init();
@@ -184,3 +258,12 @@ unsigned int plat_get_syscnt_freq2(void)
{
return COUNTER_FREQUENCY;
}
+
+#ifdef SPD_trusty
+void plat_trusty_set_boot_args(aapcs64_params_t *args)
+{
+ args->arg0 = BL32_SIZE;
+ args->arg1 = BL32_BASE;
+ args->arg2 = TRUSTY_PARAMS_LEN_BYTES;
+}
+#endif
diff --git a/plat/imx/imx8m/imx8mn/include/imx_sec_def.h b/plat/imx/imx8m/imx8mn/include/imx_sec_def.h
new file mode 100644
index 0000000000..83c5fa9513
--- /dev/null
+++ b/plat/imx/imx8m/imx8mn/include/imx_sec_def.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2020-2022 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_SEC_DEF_H
+#define IMX_SEC_DEF_H
+
+/* RDC MDA index */
+enum rdc_mda_idx {
+ RDC_MDA_A53 = 0,
+ RDC_MDA_M7 = 1,
+ RDC_MDA_SDMA3p = 3,
+ RDC_MDA_LCDIF = 5,
+ RDC_MDA_ISI = 6,
+ RDC_MDA_SDMA3b = 7,
+ RDC_MDA_Coresight = 8,
+ RDC_MDA_DAP = 9,
+ RDC_MDA_CAAM = 10,
+ RDC_MDA_SDMA1p = 11,
+ RDC_MDA_SDMA1b = 12,
+ RDC_MDA_APBHDMA = 13,
+ RDC_MDA_RAWNAND = 14,
+ RDC_MDA_uSDHC1 = 15,
+ RDC_MDA_uSDHC2 = 16,
+ RDC_MDA_uSDHC3 = 17,
+ RDC_MDA_GPU = 18,
+ RDC_MDA_USB1 = 19,
+ RDC_MDA_TESTPORT = 21,
+ RDC_MDA_ENET1_TX = 22,
+ RDC_MDA_ENET1_RX = 23,
+ RDC_MDA_SDMA2 = 24,
+};
+
+/* RDC Peripherals index */
+enum rdc_pdap_idx {
+ RDC_PDAP_GPIO1 = 0,
+ RDC_PDAP_GPIO2 = 1,
+ RDC_PDAP_GPIO3 = 2,
+ RDC_PDAP_GPIO4 = 3,
+ RDC_PDAP_GPIO5 = 4,
+ RDC_PDAP_ANA_TSENSOR = 6,
+ RDC_PDAP_ANA_OSC = 7,
+ RDC_PDAP_WDOG1 = 8,
+ RDC_PDAP_WDOG2 = 9,
+ RDC_PDAP_WDOG3 = 10,
+ RDC_PDAP_SDMA3 = 11,
+ RDC_PDAP_SDMA2 = 12,
+ RDC_PDAP_GPT1 = 13,
+ RDC_PDAP_GPT2 = 14,
+ RDC_PDAP_GPT3 = 15,
+ RDC_PDAP_ROMCP = 17,
+ RDC_PDAP_IOMUXC = 19,
+ RDC_PDAP_IOMUXC_GPR = 20,
+ RDC_PDAP_OCOTP_CTRL = 21,
+ RDC_PDAP_ANA_PLL = 22,
+ RDC_PDAP_SNVS_HP = 23,
+ RDC_PDAP_CCM = 24,
+ RDC_PDAP_SRC = 25,
+ RDC_PDAP_GPC = 26,
+ RDC_PDAP_SEMAPHORE1 = 27,
+ RDC_PDAP_SEMAPHORE2 = 28,
+ RDC_PDAP_RDC = 29,
+ RDC_PDAP_CSU = 30,
+ RDC_PDAP_LCDIF = 32,
+ RDC_PDAP_MIPI_DSI = 33,
+ RDC_PDAP_ISI = 34,
+ RDC_PDAP_MIPI_CSI = 35,
+ RDC_PDAP_USB1 = 36,
+ RDC_PDAP_PWM1 = 38,
+ RDC_PDAP_PWM2 = 39,
+ RDC_PDAP_PWM3 = 40,
+ RDC_PDAP_PWM4 = 41,
+ RDC_PDAP_System_Counter_RD = 42,
+ RDC_PDAP_System_Counter_CMP = 43,
+ RDC_PDAP_System_Counter_CTRL = 44,
+ RDC_PDAP_GPT6 = 46,
+ RDC_PDAP_GPT5 = 47,
+ RDC_PDAP_GPT4 = 48,
+ RDC_PDAP_TZASC = 56,
+ RDC_PDAP_PERFMON1 = 60,
+ RDC_PDAP_PERFMON2 = 61,
+ RDC_PDAP_PLATFORM_CTRL = 62,
+ RDC_PDAP_QoSC = 63,
+ RDC_PDAP_I2C1 = 66,
+ RDC_PDAP_I2C2 = 67,
+ RDC_PDAP_I2C3 = 68,
+ RDC_PDAP_I2C4 = 69,
+ RDC_PDAP_UART4 = 70,
+ RDC_PDAP_MU_A = 74,
+ RDC_PDAP_MU_B = 75,
+ RDC_PDAP_SEMAPHORE_HS = 76,
+ RDC_PDAP_SAI2 = 79,
+ RDC_PDAP_SAI3 = 80,
+ RDC_PDAP_SAI5 = 82,
+ RDC_PDAP_SAI6 = 83,
+ RDC_PDAP_uSDHC1 = 84,
+ RDC_PDAP_uSDHC2 = 85,
+ RDC_PDAP_uSDHC3 = 86,
+ RDC_PDAP_SAI7 = 87,
+ RDC_PDAP_SPBA2 = 90,
+ RDC_PDAP_QSPI = 91,
+ RDC_PDAP_SDMA1 = 93,
+ RDC_PDAP_ENET1 = 94,
+ RDC_PDAP_SPDIF1 = 97,
+ RDC_PDAP_eCSPI1 = 98,
+ RDC_PDAP_eCSPI2 = 99,
+ RDC_PDAP_eCSPI3 = 100,
+ RDC_PDAP_MICFIL = 101,
+ RDC_PDAP_UART1 = 102,
+ RDC_PDAP_UART3 = 104,
+ RDC_PDAP_UART2 = 105,
+ RDC_PDAP_ASRC = 107,
+ RDC_PDAP_SPBA1 = 111,
+ RDC_PDAP_CAAM = 114,
+};
+
+enum csu_csl_idx {
+ CSU_CSL_GPIO1 = 0,
+ CSU_CSL_GPIO2 = 1,
+ CSU_CSL_GPIO3 = 2,
+ CSU_CSL_GPIO4 = 3,
+ CSU_CSL_GPIO5 = 4,
+ CSU_CSL_ANA_TSENSOR = 6,
+ CSU_CSL_ANA_OSC = 7,
+ CSU_CSL_WDOG1 = 8,
+ CSU_CSL_WDOG2 = 9,
+ CSU_CSL_WDOG3 = 10,
+ CSU_CSL_SDMA2 = 12,
+ CSU_CSL_GPT1 = 13,
+ CSU_CSL_GPT2 = 14,
+ CSU_CSL_GPT3 = 15,
+ CSU_CSL_ROMCP = 17,
+ CSU_CSL_LCDIF = 18,
+ CSU_CSL_IOMUXC = 19,
+ CSU_CSL_IOMUXC_GPR = 20,
+ CSU_CSL_OCOTP_CTRL = 21,
+ CSU_CSL_ANA_PLL = 22,
+ CSU_CSL_SNVS_HP = 23,
+ CSU_CSL_CCM = 24,
+ CSU_CSL_SRC = 25,
+ CSU_CSL_GPC = 26,
+ CSU_CSL_SEMAPHORE1 = 27,
+ CSU_CSL_SEMAPHORE2 = 28,
+ CSU_CSL_RDC = 29,
+ CSU_CSL_CSU = 30,
+ CSU_CSL_DC_MST0 = 32,
+ CSU_CSL_DC_MST1 = 33,
+ CSU_CSL_DC_MST2 = 34,
+ CSU_CSL_DC_MST3 = 35,
+ CSU_CSL_PWM1 = 38,
+ CSU_CSL_PWM2 = 39,
+ CSU_CSL_PWM3 = 40,
+ CSU_CSL_PWM4 = 41,
+ CSU_CSL_System_Counter_RD = 42,
+ CSU_CSL_System_Counter_CMP = 43,
+ CSU_CSL_System_Counter_CTRL = 44,
+ CSU_CSL_GPT6 = 46,
+ CSU_CSL_GPT5 = 47,
+ CSU_CSL_GPT4 = 48,
+ CSU_CSL_TZASC = 56,
+ CSU_CSL_MTR = 59,
+ CSU_CSL_PERFMON1 = 60,
+ CSU_CSL_PERFMON2 = 61,
+ CSU_CSL_PLATFORM_CTRL = 62,
+ CSU_CSL_QoSC = 63,
+ CSU_CSL_MIPI_PHY = 64,
+ CSU_CSL_MIPI_DSI = 65,
+ CSU_CSL_I2C1 = 66,
+ CSU_CSL_I2C2 = 67,
+ CSU_CSL_I2C3 = 68,
+ CSU_CSL_I2C4 = 69,
+ CSU_CSL_UART4 = 70,
+ CSU_CSL_MIPI_CSI1 = 71,
+ CSU_CSL_MIPI_CSI_PHY1 = 72,
+ CSU_CSL_CSI1 = 73,
+ CSU_CSL_MU_A = 74,
+ CSU_CSL_MU_B = 75,
+ CSU_CSL_SEMAPHORE_HS = 76,
+ CSU_CSL_SAI1 = 78,
+ CSU_CSL_SAI6 = 80,
+ CSU_CSL_SAI5 = 81,
+ CSU_CSL_SAI4 = 82,
+ CSU_CSL_uSDHC1 = 84,
+ CSU_CSL_uSDHC2 = 85,
+ CSU_CSL_MIPI_CSI2 = 86,
+ CSU_CSL_MIPI_CSI_PHY2 = 87,
+ CSU_CSL_CSI2 = 88,
+ CSU_CSL_SPBA2 = 90,
+ CSU_CSL_QSPI = 91,
+ CSU_CSL_SDMA1 = 93,
+ CSU_CSL_ENET1 = 94,
+ CSU_CSL_SPDIF1 = 97,
+ CSU_CSL_eCSPI1 = 98,
+ CSU_CSL_eCSPI2 = 99,
+ CSU_CSL_eCSPI3 = 100,
+ CSU_CSL_UART1 = 102,
+ CSU_CSL_UART3 = 104,
+ CSU_CSL_UART2 = 105,
+ CSU_CSL_SPDIF2 = 106,
+ CSU_CSL_SAI2 = 107,
+ CSU_CSL_SAI3 = 108,
+ CSU_CSL_SPBA1 = 111,
+ CSU_CSL_CAAM = 114,
+ CSU_CSL_OCRAM = 118,
+ CSU_CSL_OCRAM_S = 119,
+};
+
+enum csu_sa_idx {
+ CSU_SA_M7 = 1,
+ CSU_SA_SDMA1 = 2,
+ CSU_SA_USB1 = 4,
+ CSU_SA_GPU = 7,
+ CSU_SA_APBHDMA = 8,
+ CSU_SA_ENET1 = 9,
+ CSU_SA_USDHC1 = 10,
+ CSU_SA_USDHC2 = 11,
+ CSU_SA_USDHC3 = 12,
+ CSU_SA_HUGO = 13,
+ CSU_SA_DAP = 14,
+ CSU_SA_SDMA2 = 15,
+ CSU_SA_CAAM = 16,
+ CSU_SA_SDMA3 = 17,
+ CSU_SA_LCDIF = 18,
+ CSU_SA_ISI = 19,
+};
+
+#endif /* IMX_SEC_DEF_H */
diff --git a/plat/imx/imx8m/imx8mn/include/platform_def.h b/plat/imx/imx8m/imx8mn/include/platform_def.h
index 9c46d8d278..8e7be98ec7 100644
--- a/plat/imx/imx8m/imx8mn/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mn/include/platform_def.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2020 NXP
+ * Copyright 2020-2022 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,6 +8,7 @@
#include <lib/utils_def.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
@@ -40,10 +41,15 @@
#define PLAT_SDEI_SGI_PRIVATE U(9)
#define BL31_BASE U(0x960000)
-#define BL31_LIMIT U(0x980000)
+#define BL31_SIZE SZ_128K
+#define BL31_LIMIT (BL31_BASE + BL31_SIZE)
/* non-secure uboot base */
+#ifndef PLAT_NS_IMAGE_OFFSET
#define PLAT_NS_IMAGE_OFFSET U(0x40200000)
+#endif
+
+#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000)
/* GICv3 base address */
#define PLAT_GICD_BASE U(0x38800000)
@@ -68,7 +74,7 @@
#define IMX_AIPSTZ4 U(0x32df0000)
#define IMX_AIPS_BASE U(0x30000000)
-#define IMX_AIPS_SIZE U(0xC00000)
+#define IMX_AIPS_SIZE U(0x3000000)
#define IMX_GPV_BASE U(0x32000000)
#define IMX_GPV_SIZE U(0x800000)
#define IMX_AIPS1_BASE U(0x30200000)
@@ -90,6 +96,13 @@
#define IMX_DDR_IPS_BASE U(0x3d000000)
#define IMX_DDR_IPS_SIZE U(0x1800000)
#define IMX_ROM_BASE U(0x0)
+#define IMX_ROM_SIZE U(0x40000)
+#define IMX_NS_OCRAM_BASE U(0x900000)
+#define IMX_NS_OCRAM_SIZE U(0x60000)
+#define IMX_CAAM_RAM_BASE U(0x100000)
+#define IMX_CAAM_RAM_SIZE U(0x10000)
+#define IMX_DRAM_BASE U(0x40000000)
+#define IMX_DRAM_SIZE U(0xc0000000)
#define IMX_GIC_BASE PLAT_GICD_BASE
#define IMX_GIC_SIZE U(0x200000)
@@ -130,11 +143,26 @@
#define COUNTER_FREQUENCY 8000000 /* 8MHz */
+#define GPV5_BASE_ADDR U(0x32500000)
+#define FORCE_INCR_OFFSET U(0x4044)
+#define FORCE_INCR_BIT_MASK U(0x2)
+
#define IMX_WDOG_B_RESET
#define GIC_MAP MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW)
#define AIPS_MAP MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW) /* AIPS map */
#define OCRAM_S_MAP MAP_REGION_FLAT(OCRAM_S_BASE, OCRAM_S_SIZE, MT_DEVICE | MT_RW) /* OCRAM_S */
#define DDRC_MAP MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW) /* DDRMIX */
+#define CAAM_RAM_MAP MAP_REGION_FLAT(IMX_CAAM_RAM_BASE, IMX_CAAM_RAM_SIZE, MT_MEMORY | MT_RW) /* CAMM RAM */
+#define NS_OCRAM_MAP MAP_REGION_FLAT(IMX_NS_OCRAM_BASE, IMX_NS_OCRAM_SIZE, MT_MEMORY | MT_RW) /* NS OCRAM */
+#define ROM_MAP MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO) /* ROM code */
+
+/*
+ * Note: DRAM region is mapped with entire size available and uses MT_RW
+ * attributes.
+ * See details in docs/plat/imx8m.rst "High Assurance Boot (HABv4)" section
+ * for explanation of this mapping scheme.
+ */
+#define DRAM_MAP MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS) /* DRAM */
#endif /* platform_def.h */
diff --git a/plat/imx/imx8m/imx8mn/platform.mk b/plat/imx/imx8m/imx8mn/platform.mk
index 208708918f..6036b6adf4 100644
--- a/plat/imx/imx8m/imx8mn/platform.mk
+++ b/plat/imx/imx8m/imx8mn/platform.mk
@@ -1,5 +1,5 @@
#
-# Copyright 2019-2020 NXP
+# Copyright 2019-2022 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -13,6 +13,13 @@ include lib/xlat_tables_v2/xlat_tables.mk
# Include GICv3 driver files
include drivers/arm/gic/v3/gicv3.mk
+IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \
+ plat/imx/imx8m/ddr/clock.c \
+ plat/imx/imx8m/ddr/dram_retention.c \
+ plat/imx/imx8m/ddr/ddr4_dvfs.c \
+ plat/imx/imx8m/ddr/lpddr4_dvfs.c
+
+
IMX_GIC_SOURCES := ${GICV3_SOURCES} \
plat/common/plat_gicv3.c \
plat/common/plat_psci_common.c \
@@ -20,10 +27,14 @@ IMX_GIC_SOURCES := ${GICV3_SOURCES} \
BL31_SOURCES += plat/imx/common/imx8_helpers.S \
plat/imx/imx8m/gpc_common.c \
+ plat/imx/imx8m/imx_hab.c \
plat/imx/imx8m/imx_aipstz.c \
plat/imx/imx8m/imx_rdc.c \
plat/imx/imx8m/imx8m_caam.c \
+ plat/imx/imx8m/imx8m_ccm.c \
+ plat/imx/imx8m/imx8m_csu.c \
plat/imx/imx8m/imx8m_psci_common.c \
+ plat/imx/imx8m/imx8m_snvs.c \
plat/imx/imx8m/imx8mn/imx8mn_bl31_setup.c \
plat/imx/imx8m/imx8mn/imx8mn_psci.c \
plat/imx/imx8m/imx8mn/gpc.c \
@@ -31,15 +42,15 @@ BL31_SOURCES += plat/imx/common/imx8_helpers.S \
plat/imx/common/imx_sip_handler.c \
plat/imx/common/imx_sip_svc.c \
plat/imx/common/imx_uart_console.S \
- plat/imx/common/imx_ehf.c \
- plat/imx/common/imx_sdei.c \
lib/cpus/aarch64/cortex_a53.S \
drivers/arm/tzc/tzc380.c \
drivers/delay_timer/delay_timer.c \
drivers/delay_timer/generic_delay_timer.c \
+ ${IMX_DRAM_SOURCES} \
${IMX_GIC_SOURCES} \
${XLAT_TABLES_LIB_SRCS}
+ENABLE_PIE := 1
USE_COHERENT_MEM := 1
RESET_TO_BL31 := 1
A53_DISABLE_NON_TEMPORAL_HINT := 0
@@ -48,6 +59,10 @@ ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+ifneq (${PRELOADED_BL33_BASE},)
+$(eval $(call add_define_val,PLAT_NS_IMAGE_OFFSET,${PRELOADED_BL33_BASE}))
+endif
+
BL32_BASE ?= 0xbe000000
$(eval $(call add_define,BL32_BASE))
@@ -55,7 +70,17 @@ BL32_SIZE ?= 0x2000000
$(eval $(call add_define,BL32_SIZE))
IMX_BOOT_UART_BASE ?= 0x30890000
+ifeq (${IMX_BOOT_UART_BASE},auto)
+ override IMX_BOOT_UART_BASE := 0
+endif
$(eval $(call add_define,IMX_BOOT_UART_BASE))
-EL3_EXCEPTION_HANDLING := 1
-SDEI_SUPPORT := 1
+EL3_EXCEPTION_HANDLING := $(SDEI_SUPPORT)
+ifeq (${SDEI_SUPPORT}, 1)
+BL31_SOURCES += plat/imx/common/imx_ehf.c \
+ plat/imx/common/imx_sdei.c
+endif
+
+ifeq (${SPD},trusty)
+ BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1
+endif
diff --git a/plat/imx/imx8m/imx8mp/gpc.c b/plat/imx/imx8m/imx8mp/gpc.c
index d660e3d885..a95eb3651a 100644
--- a/plat/imx/imx8m/imx8mp/gpc.c
+++ b/plat/imx/imx8m/imx8mp/gpc.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-2020 NXP
+ * Copyright 2019-2022 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -69,10 +69,11 @@ enum pu_domain_id {
HDMIMIX,
HDMI_PHY,
DDRMIX,
+ MAX_DOMAINS,
};
/* PU domain, add some hole to minimize the uboot change */
-static struct imx_pwr_domain pu_domains[20] = {
+static struct imx_pwr_domain pu_domains[MAX_DOMAINS] = {
[MIPI_PHY1] = IMX_PD_DOMAIN(MIPI_PHY1, false),
[PCIE_PHY] = IMX_PD_DOMAIN(PCIE_PHY, false),
[USB1_PHY] = IMX_PD_DOMAIN(USB1_PHY, true),
@@ -169,11 +170,16 @@ static void imx_noc_qos(unsigned int domain_id)
}
}
-static void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on)
+void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on)
{
struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id];
unsigned int i;
+ /* validate the domain id */
+ if (domain_id >= MAX_DOMAINS) {
+ return;
+ }
+
if (domain_id == HSIOMIX) {
for (i = 0; i < ARRAY_SIZE(hsiomix_clk); i++) {
hsiomix_clk[i].val = mmio_read_32(IMX_CCM_BASE + hsiomix_clk[i].offset);
@@ -331,7 +337,7 @@ void imx_gpc_init(void)
/*
* Set the CORE & SCU power up timing:
* SW = 0x1, SW2ISO = 0x1;
- * the CPU CORE and SCU power up timming counter
+ * the CPU CORE and SCU power up timing counter
* is drived by 32K OSC, each domain's power up
* latency is (SW + SW2ISO) / 32768
*/
@@ -368,12 +374,20 @@ void imx_gpc_init(void)
mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
- /* enable all the power domain by default */
+ /* enable all clocks by default */
for (i = 0; i < 101; i++) {
mmio_write_32(IMX_CCM_BASE + CCGR(i), 0x3);
}
- for (i = 0; i < 20; i++) {
- imx_gpc_pm_domain_enable(i, true);
- }
+ /* Depending on SKU, we may be lacking e.g. a VPU and shouldn't
+ * access that domain here, because that would lockup the SoC.
+ * Other i.MX8M variants don't initialize any power domains, but
+ * for 8MP we have been enabling the USB power domains since the
+ * beginning and stopping to do this now may render systems
+ * unrecoverable. So we'll keep initializing just the USB power
+ * domains instead of all of them like before.
+ */
+ imx_gpc_pm_domain_enable(HSIOMIX, true);
+ imx_gpc_pm_domain_enable(USB1_PHY, true);
+ imx_gpc_pm_domain_enable(USB2_PHY, true);
}
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c b/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c
new file mode 100644
index 0000000000..08cbeeb20e
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2021 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <context.h>
+#include <drivers/arm/tzc380.h>
+#include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/mmc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+#include <lib/optee_utils.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <imx8m_caam.h>
+#include "imx8mp_private.h"
+#include <imx_aipstz.h>
+#include <imx_rdc.h>
+#include <imx_uart.h>
+#include <plat/common/platform.h>
+#include <plat_imx8.h>
+#include <platform_def.h>
+
+
+static const struct aipstz_cfg aipstz[] = {
+ {IMX_AIPSTZ1, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+ {IMX_AIPSTZ2, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+ {IMX_AIPSTZ3, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+ {IMX_AIPSTZ4, 0x77777777, 0x77777777, .opacr = {0x0, 0x0, 0x0, 0x0, 0x0}, },
+ {0},
+};
+
+void bl2_el3_early_platform_setup(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ static console_t console;
+ unsigned int i;
+
+ /* Enable CSU NS access permission */
+ for (i = 0U; i < 64; i++) {
+ mmio_write_32(IMX_CSU_BASE + i * 4, 0x00ff00ff);
+ }
+
+ imx_aipstz_init(aipstz);
+
+ console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+ IMX_CONSOLE_BAUDRATE, &console);
+
+ generic_delay_timer_init();
+
+ /* select the CKIL source to 32K OSC */
+ mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1);
+
+ /* Open handles to a FIP image */
+ plat_imx_io_setup();
+}
+
+void bl2_el3_plat_arch_setup(void)
+{
+}
+
+void bl2_platform_setup(void)
+{
+}
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+ int err = 0;
+ bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+ bl_mem_params_node_t *pager_mem_params = NULL;
+ bl_mem_params_node_t *paged_mem_params = NULL;
+
+ assert(bl_mem_params);
+
+ switch (image_id) {
+ case BL32_IMAGE_ID:
+ pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
+ assert(pager_mem_params);
+
+ paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
+ assert(paged_mem_params);
+
+ err = parse_optee_header(&bl_mem_params->ep_info,
+ &pager_mem_params->image_info,
+ &paged_mem_params->image_info);
+ if (err != 0) {
+ WARN("OPTEE header parse error.\n");
+ }
+
+ break;
+ default:
+ /* Do nothing in default case */
+ break;
+ }
+
+ return err;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return COUNTER_FREQUENCY;
+}
+
+void bl2_plat_runtime_setup(void)
+{
+ return;
+}
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c b/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c
new file mode 100644
index 0000000000..f2f6808e8c
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <common/desc_image_load.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+ {
+ .image_id = BL31_IMAGE_ID,
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+ entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = BL31_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2, image_info_t,
+ IMAGE_ATTRIB_PLAT_SETUP),
+ .image_info.image_base = BL31_BASE,
+ .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+ {
+ .image_id = BL32_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+ entry_point_info_t,
+ SECURE | EXECUTABLE),
+ .ep_info.pc = BL32_BASE,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2,
+ image_info_t, 0),
+
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_SIZE,
+
+ .next_handoff_image_id = BL33_IMAGE_ID,
+ },
+ {
+ .image_id = BL32_EXTRA1_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+ entry_point_info_t,
+ SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, VERSION_2,
+ image_info_t, IMAGE_ATTRIB_SKIP_LOADING),
+ .image_info.image_base = BL32_BASE,
+ .image_info.image_max_size = BL32_SIZE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+ {
+ /* This is a zero sized image so we don't set base or size */
+ .image_id = BL32_EXTRA2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t,
+ IMAGE_ATTRIB_SKIP_LOADING),
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+ {
+ .image_id = BL33_IMAGE_ID,
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, VERSION_2,
+ entry_point_info_t,
+ NON_SECURE | EXECUTABLE),
+ # ifdef PRELOADED_BL33_BASE
+ .ep_info.pc = PLAT_NS_IMAGE_OFFSET,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t,
+ IMAGE_ATTRIB_SKIP_LOADING),
+ # else
+ .ep_info.pc = PLAT_NS_IMAGE_OFFSET,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = PLAT_NS_IMAGE_OFFSET,
+ .image_info.image_max_size = PLAT_NS_IMAGE_SIZE,
+ # endif /* PRELOADED_BL33_BASE */
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ }
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs);
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c b/plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c
index 22fbd5e4b4..8e352198e9 100644
--- a/plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c
+++ b/plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020 NXP
+ * Copyright 2020-2022 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -19,17 +19,25 @@
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/platform.h>
+#include <dram.h>
#include <gpc.h>
#include <imx_aipstz.h>
#include <imx_uart.h>
#include <imx_rdc.h>
#include <imx8m_caam.h>
+#include <imx8m_ccm.h>
+#include <imx8m_csu.h>
+#include <imx8m_snvs.h>
#include <platform_def.h>
#include <plat_imx8.h>
+#define TRUSTY_PARAMS_LEN_BYTES (4096*2)
+
static const mmap_region_t imx_mmap[] = {
GIC_MAP, AIPS_MAP, OCRAM_S_MAP, DDRC_MAP,
- NOC_MAP, {0},
+ NOC_MAP, CAAM_RAM_MAP, NS_OCRAM_MAP,
+ ROM_MAP, DRAM_MAP,
+ {0},
};
static const struct aipstz_cfg aipstz[] = {
@@ -42,9 +50,10 @@ static const struct aipstz_cfg aipstz[] = {
static const struct imx_rdc_cfg rdc[] = {
/* Master domain assignment */
- RDC_MDAn(0x1, DID1),
+ RDC_MDAn(RDC_MDA_M7, DID1),
/* peripherals domain permission */
+ RDC_PDAPn(RDC_PDAP_UART2, D0R | D0W),
/* memory region */
@@ -52,6 +61,54 @@ static const struct imx_rdc_cfg rdc[] = {
{0},
};
+static const struct imx_csu_cfg csu_cfg[] = {
+ /* peripherals csl setting */
+ CSU_CSLx(CSU_CSL_OCRAM, CSU_SEC_LEVEL_2, LOCKED),
+ CSU_CSLx(CSU_CSL_OCRAM_S, CSU_SEC_LEVEL_2, LOCKED),
+ CSU_CSLx(CSU_CSL_RDC, CSU_SEC_LEVEL_3, LOCKED),
+ CSU_CSLx(CSU_CSL_TZASC, CSU_SEC_LEVEL_5, LOCKED),
+ CSU_CSLx(CSU_CSL_CSU, CSU_SEC_LEVEL_5, LOCKED),
+
+ /* master HP0~1 */
+
+ /* SA setting */
+ CSU_SA(CSU_SA_M7, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_SDMA1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_PCIE_CTRL1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USB1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USB2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_APB_HDMA, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_ENET1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USDHC1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USDHC2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_USDHC3, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_HUGO, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_DAP, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_SDMA2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_SDMA3, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_LCDIF1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_ISI, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_NPU, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_LCDIF2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_HDMI_TX, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_ENET2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_GPU3D, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_GPU2D, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_VPU_G1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_VPU_G2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_VPU_VC8000E, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_AUDIO_EDMA, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_ISP1, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_ISP2, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_DEWARP, NON_SEC_ACCESS, LOCKED),
+ CSU_SA(CSU_SA_GIC500, NON_SEC_ACCESS, LOCKED),
+
+ /* HP control setting */
+
+ /* Sentinel */
+ {0}
+};
+
static entry_point_info_t bl32_image_ep_info;
static entry_point_info_t bl33_image_ep_info;
@@ -95,7 +152,9 @@ static void bl31_tzc380_setup(void)
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
+ unsigned int console_base = IMX_BOOT_UART_BASE;
static console_t console;
+ unsigned int val;
unsigned int i;
/* Enable CSU NS access permission */
@@ -107,13 +166,24 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
imx_rdc_init(rdc);
- imx8m_caam_init();
+ imx_csu_init(csu_cfg);
+
+ /* config the ocram memory range for secure access */
+ mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, 0x4E1);
+ val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x2c);
+ mmio_write_32(IMX_IOMUX_GPR_BASE + 0x2c, val | 0x3DFF0000);
+
+ if (console_base == 0U) {
+ console_base = imx8m_uart_get_base();
+ }
- console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+ console_imx_uart_register(console_base, IMX_BOOT_UART_CLK_IN_HZ,
IMX_CONSOLE_BAUDRATE, &console);
/* This console is only used for boot stage */
console_set_scope(&console, CONSOLE_FLAG_BOOT);
+ imx8m_caam_init();
+
/*
* tell BL3-1 where the non-secure software image is located
* and the entry state information.
@@ -122,7 +192,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
bl33_image_ep_info.spsr = get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
-#ifdef SPD_opteed
+#if defined(SPD_opteed) || defined(SPD_trusty)
/* Populate entry point information for BL32 */
SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
@@ -132,26 +202,51 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
/* Pass TEE base and size to bl33 */
bl33_image_ep_info.args.arg1 = BL32_BASE;
bl33_image_ep_info.args.arg2 = BL32_SIZE;
+
+#ifdef SPD_trusty
+ bl32_image_ep_info.args.arg0 = BL32_SIZE;
+ bl32_image_ep_info.args.arg1 = BL32_BASE;
+#else
+ /* Make sure memory is clean */
+ mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0);
+ bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+ bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+#endif
+#endif
+
+#if !defined(SPD_opteed) && !defined(SPD_trusty)
+ enable_snvs_privileged_access();
#endif
bl31_tzc380_setup();
}
+#define MAP_BL31_TOTAL \
+ MAP_REGION_FLAT(BL31_START, BL31_SIZE, MT_MEMORY | MT_RW | MT_SECURE)
+#define MAP_BL31_RO \
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE)
+#define MAP_COHERENT_MEM \
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, \
+ MT_DEVICE | MT_RW | MT_SECURE)
+#define MAP_BL32_TOTAL \
+ MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW)
+
void bl31_plat_arch_setup(void)
{
- mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE),
- MT_MEMORY | MT_RW | MT_SECURE);
- mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE),
- MT_MEMORY | MT_RO | MT_SECURE);
+ const mmap_region_t bl_regions[] = {
+ MAP_BL31_TOTAL,
+ MAP_BL31_RO,
#if USE_COHERENT_MEM
- mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE,
- (BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE),
- MT_DEVICE | MT_RW | MT_SECURE);
+ MAP_COHERENT_MEM,
#endif
- mmap_add(imx_mmap);
-
- init_xlat_tables();
+#if defined(SPD_opteed) || defined(SPD_trusty)
+ /* Map TEE memory */
+ MAP_BL32_TOTAL,
+#endif
+ {0}
+ };
+ setup_page_tables(bl_regions, imx_mmap);
enable_mmu_el3(0);
}
@@ -162,6 +257,9 @@ void bl31_platform_setup(void)
/* select the CKIL source to 32K OSC */
mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1);
+ /* Init the dram info */
+ dram_info_init(SAVED_DRAM_TIMING_BASE);
+
plat_gic_driver_init();
plat_gic_init();
@@ -185,3 +283,12 @@ unsigned int plat_get_syscnt_freq2(void)
{
return COUNTER_FREQUENCY;
}
+
+#ifdef SPD_trusty
+void plat_trusty_set_boot_args(aapcs64_params_t *args)
+{
+ args->arg0 = BL32_SIZE;
+ args->arg1 = BL32_BASE;
+ args->arg2 = TRUSTY_PARAMS_LEN_BYTES;
+}
+#endif
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S b/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S
new file mode 100644
index 0000000000..a4c7ce150c
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_rotpk.S
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ .global imx8mp_rotpk_hash
+ .global imx8mp_rotpk_hash_end
+imx8mp_rotpk_hash:
+ /* DER header */
+ .byte 0x30, 0x31, 0x30, 0x0D, 0x06, 0x09, 0x60, 0x86, 0x48
+ .byte 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20
+ /* SHA256 */
+ .incbin ROTPK_HASH
+imx8mp_rotpk_hash_end:
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c b/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c
new file mode 100644
index 0000000000..5d1a6c21bc
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <plat/common/platform.h>
+
+extern char imx8mp_rotpk_hash[], imx8mp_rotpk_hash_end[];
+
+int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
+ unsigned int *flags)
+{
+ *key_ptr = imx8mp_rotpk_hash;
+ *key_len = imx8mp_rotpk_hash_end - imx8mp_rotpk_hash;
+ *flags = ROTPK_IS_HASH;
+
+ return 0;
+}
+
+int plat_get_nv_ctr(void *cookie, unsigned int *nv_ctr)
+{
+ *nv_ctr = 0;
+
+ return 0;
+}
+
+int plat_set_nv_ctr(void *cookie, unsigned int nv_ctr)
+{
+ return 1;
+}
+
+int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size)
+{
+ return get_mbedtls_heap_helper(heap_addr, heap_size);
+}
diff --git a/plat/imx/imx8m/imx8mp/include/imx8mp_private.h b/plat/imx/imx8m/imx8mp/include/imx8mp_private.h
new file mode 100644
index 0000000000..0a02334db1
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/include/imx8mp_private.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8MP_PRIVATE_H
+#define IMX8MP_PRIVATE_H
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+void plat_imx_io_setup(void);
+
+#endif /* IMX8MP_PRIVATE_H */
diff --git a/plat/imx/imx8m/imx8mp/include/imx_sec_def.h b/plat/imx/imx8m/imx8mp/include/imx_sec_def.h
new file mode 100644
index 0000000000..1ba30339ca
--- /dev/null
+++ b/plat/imx/imx8m/imx8mp/include/imx_sec_def.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2020-2022 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_SEC_DEF_H
+#define IMX_SEC_DEF_H
+
+/* RDC MDA index */
+enum rdc_mda_idx {
+ RDC_MDA_A53 = 0,
+ RDC_MDA_M7 = 1,
+ RDC_MDA_PCIE_CTRL1 = 2,
+ RDC_MDA_SDMA3p = 3,
+ RDC_MDA_SDMA3b = 4,
+ RDC_MDA_LCDIF = 5,
+ RDC_MDA_ISI = 6,
+ RDC_MDA_NPU = 7,
+ RDC_MDA_Coresight = 8,
+ RDC_MDA_DAP = 9,
+ RDC_MDA_CAAM = 10,
+ RDC_MDA_SDMA1p = 11,
+ RDC_MDA_SDMA1b = 12,
+ RDC_MDA_APBHDMA = 13,
+ RDC_MDA_RAWNAND = 14,
+ RDC_MDA_uSDHC1 = 15,
+ RDC_MDA_uSDHC2 = 16,
+ RDC_MDA_uSDHC3 = 17,
+ RDC_MDA_AUDIO_PROCESSOR = 18,
+ RDC_MDA_USB1 = 19,
+ RDC_MDA_USB2 = 20,
+ RDC_MDA_TESTPORT = 21,
+ RDC_MDA_ENET1_TX = 22,
+ RDC_MDA_ENET1_RX = 23,
+ RDC_MDA_SDMA2 = 24,
+ RDC_MDA_SDMA3_to_SPBA2 = 25,
+ RDC_MDA_SDMA1_to_SPBA1 = 26,
+ RDC_MDA_LCDIF2 = 27,
+ RDC_MDA_HDMI_TX = 28,
+ RDC_MDA_ENET2 = 29,
+ RDC_MDA_GPU3D = 30,
+ RDC_MDA_GPU2D = 31,
+ RDC_MDA_VPU_G1 = 32,
+ RDC_MDA_VPU_G2 = 33,
+ RDC_MDA_VPU_VC8000E = 34,
+ RDC_MDA_AUDIO_EDMA = 35,
+ RDC_MDA_ISP1 = 36,
+ RDC_MDA_ISP2 = 37,
+ RDC_MDA_DEWARP = 38,
+ RDC_MDA_GIC500 = 39,
+};
+
+/* RDC Peripherals index */
+enum rdc_pdap_idx {
+ RDC_PDAP_GPIO1 = 0,
+ RDC_PDAP_GPIO2 = 1,
+ RDC_PDAP_GPIO3 = 2,
+ RDC_PDAP_GPIO4 = 3,
+ RDC_PDAP_GPIO5 = 4,
+ RDC_PDAP_MU_2_A = 5,
+ RDC_PDAP_ANA_TSENSOR = 6,
+ RDC_PDAP_ANA_OSC = 7,
+ RDC_PDAP_WDOG1 = 8,
+ RDC_PDAP_WDOG2 = 9,
+ RDC_PDAP_WDOG3 = 10,
+ RDC_PDAP_GPT1 = 13,
+ RDC_PDAP_GPT2 = 14,
+ RDC_PDAP_GPT3 = 15,
+ RDC_PDAP_MU_2_B = 16,
+ RDC_PDAP_ROMCP = 17,
+ RDC_PDAP_MU_3_A = 18,
+ RDC_PDAP_IOMUXC = 19,
+ RDC_PDAP_IOMUXC_GPR = 20,
+ RDC_PDAP_OCOTP_CTRL = 21,
+ RDC_PDAP_ANA_PLL = 22,
+ RDC_PDAP_SNVS_HP = 23,
+ RDC_PDAP_CCM = 24,
+ RDC_PDAP_SRC = 25,
+ RDC_PDAP_GPC = 26,
+ RDC_PDAP_SEMAPHORE1 = 27,
+ RDC_PDAP_SEMAPHORE2 = 28,
+ RDC_PDAP_RDC = 29,
+ RDC_PDAP_CSU = 30,
+ RDC_PDAP_MU_3_B = 31,
+ RDC_PDAP_ISI = 32,
+ RDC_PDAP_ISP0 = 33,
+ RDC_PDAP_ISP1 = 34,
+ RDC_PDAP_IPS_Dewarp = 35,
+ RDC_PDAP_MIPI_CSI0 = 36,
+ RDC_PDAP_HSIOMIX_BLK_CTL = 37,
+ RDC_PDAP_PWM1 = 38,
+ RDC_PDAP_PWM2 = 39,
+ RDC_PDAP_PWM3 = 40,
+ RDC_PDAP_PWM4 = 41,
+ RDC_PDAP_System_Counter_RD = 42,
+ RDC_PDAP_System_Counter_CMP = 43,
+ RDC_PDAP_System_Counter_CTRL = 44,
+ RDC_PDAP_I2C5 = 45,
+ RDC_PDAP_GPT6 = 46,
+ RDC_PDAP_GPT5 = 47,
+ RDC_PDAP_GPT4 = 48,
+ RDC_PDAP_MIPI_CSI1 = 49,
+ RDC_PDAP_MIPI_DSI0 = 50,
+ RDC_PDAP_MEDIAMIX_BLK_CTL = 51,
+ RDC_PDAP_LCDIF1 = 52,
+ RDC_PDAP_eDMA_Management_Page = 53,
+ RDC_PDAP_eDMA_Channels_15_0 = 54,
+ RDC_PDAP_eDMA_Channels_31_16 = 55,
+ RDC_PDAP_TZASC = 56,
+ RDC_PDAP_I2C6 = 57,
+ RDC_PDAP_CAAM = 58,
+ RDC_PDAP_LCDIF2 = 59,
+ RDC_PDAP_PERFMON1 = 60,
+ RDC_PDAP_PERFMON2 = 61,
+ RDC_PDAP_NOC_BLK_CTL = 62,
+ RDC_PDAP_QoSC = 63,
+ RDC_PDAP_LVDS0 = 64,
+ RDC_PDAP_LVDS1 = 65,
+ RDC_PDAP_I2C1 = 66,
+ RDC_PDAP_I2C2 = 67,
+ RDC_PDAP_I2C3 = 68,
+ RDC_PDAP_I2C4 = 69,
+ RDC_PDAP_UART4 = 70,
+ RDC_PDAP_HDMI_TX = 71,
+ RDC_PDAP_IRQ_STEER_Audio_Processor = 72,
+ RDC_PDAP_SDMA2 = 73,
+ RDC_PDAP_MU_1_A = 74,
+ RDC_PDAP_MU_1_B = 75,
+ RDC_PDAP_SEMAPHORE_HS = 76,
+ RDC_PDAP_SAI1 = 78,
+ RDC_PDAP_SAI2 = 79,
+ RDC_PDAP_SAI3 = 80,
+ RDC_PDAP_CAN_FD1 = 81,
+ RDC_PDAP_SAI5 = 82,
+ RDC_PDAP_SAI6 = 83,
+ RDC_PDAP_uSDHC1 = 84,
+ RDC_PDAP_uSDHC2 = 85,
+ RDC_PDAP_uSDHC3 = 86,
+ RDC_PDAP_PCIE_PHY1 = 87,
+ RDC_PDAP_HDMI_TX_AUDLNK_MSTR = 88,
+ RDC_PDAP_CAN_FD2 = 89,
+ RDC_PDAP_SPBA2 = 90,
+ RDC_PDAP_QSPI = 91,
+ RDC_PDAP_AUDIO_BLK_CTRL = 92,
+ RDC_PDAP_SDMA1 = 93,
+ RDC_PDAP_ENET1 = 94,
+ RDC_PDAP_ENET2_TSN = 95,
+ RDC_PDAP_ASRC = 97,
+ RDC_PDAP_eCSPI1 = 98,
+ RDC_PDAP_eCSPI2 = 99,
+ RDC_PDAP_eCSPI3 = 100,
+ RDC_PDAP_SAI7 = 101,
+ RDC_PDAP_UART1 = 102,
+ RDC_PDAP_UART3 = 104,
+ RDC_PDAP_UART2 = 105,
+ RDC_PDAP_PDM_MICFIL = 106,
+ RDC_PDAP_AUDIO_XCVR_RX_eARC = 107,
+ RDC_PDAP_SDMA3 = 109,
+ RDC_PDAP_SPBA1 = 111,
+};
+
+enum csu_csl_idx {
+ CSU_CSL_GPIO1 = 0,
+ CSU_CSL_GPIO2 = 1,
+ CSU_CSL_GPIO3 = 2,
+ CSU_CSL_GPIO4 = 3,
+ CSU_CSL_GPIO5 = 4,
+ CSU_CSL_MU_2_A = 5,
+ CSU_CSL_ANA_TSENSOR = 6,
+ CSU_CSL_ANA_OSC = 7,
+ CSU_CSL_WDOG1 = 8,
+ CSU_CSL_WDOG2 = 9,
+ CSU_CSL_WDOG3 = 10,
+ CSU_CSL_GPT1 = 13,
+ CSU_CSL_GPT2 = 14,
+ CSU_CSL_GPT3 = 15,
+ CSU_CSL_MU_2_B = 16,
+ CSU_CSL_ROMCP = 17,
+ CSU_CSL_MU_3_A = 18,
+ CSU_CSL_IOMUXC = 19,
+ CSU_CSL_IOMUXC_GPR = 20,
+ CSU_CSL_OCOTP_CTRL = 21,
+ CSU_CSL_ANA_PLL = 22,
+ CSU_CSL_SNVS_HP = 23,
+ CSU_CSL_CCM = 24,
+ CSU_CSL_SRC = 25,
+ CSU_CSL_GPC = 26,
+ CSU_CSL_SEMAPHORE1 = 27,
+ CSU_CSL_SEMAPHORE2 = 28,
+ CSU_CSL_RDC = 29,
+ CSU_CSL_CSU = 30,
+ CSU_CSL_MU_3_B = 31,
+ CSU_CSL_ISI = 32,
+ CSU_CSL_ISP0 = 33,
+ CSU_CSL_ISP1 = 34,
+ CSU_CSL_IPS_Dewarp = 35,
+ CSU_CSL_MIPI_CSI0 = 36,
+ CSU_CSL_HSIOMIX_BLK_CTL = 37,
+ CSU_CSL_PWM1 = 38,
+ CSU_CSL_PWM2 = 39,
+ CSU_CSL_PWM3 = 40,
+ CSU_CSL_PWM4 = 41,
+ CSU_CSL_System_Counter_RD = 42,
+ CSU_CSL_System_Counter_CMP = 43,
+ CSU_CSL_System_Counter_CTRL = 44,
+ CSU_CSL_I2C5 = 45,
+ CSU_CSL_GPT6 = 46,
+ CSU_CSL_GPT5 = 47,
+ CSU_CSL_GPT4 = 48,
+ CSU_CSL_MIPI_CSI1 = 49,
+ CSU_CSL_MIPI_DSI0 = 50,
+ CSU_CSL_MEDIAMIX_BLK_CTL = 51,
+ CSU_CSL_LCDIF1 = 52,
+ CSU_CSL_eDMA_Management_Page = 53,
+ CSU_CSL_eDMA_Channels_15_0 = 54,
+ CSU_CSL_eDMA_Channels_31_16 = 55,
+ CSU_CSL_TZASC = 56,
+ CSU_CSL_I2C6 = 57,
+ CSU_CSL_CAAM = 58,
+ CSU_CSL_LCDIF2 = 59,
+ CSU_CSL_PERFMON1 = 60,
+ CSU_CSL_PERFMON2 = 61,
+ CSU_CSL_NOC_BLK_CTL = 62,
+ CSU_CSL_QoSC = 63,
+ CSU_CSL_LVDS0 = 64,
+ CSU_CSL_LVDS1 = 65,
+ CSU_CSL_I2C1 = 66,
+ CSU_CSL_I2C2 = 67,
+ CSU_CSL_I2C3 = 68,
+ CSU_CSL_I2C4 = 69,
+ CSU_CSL_UART4 = 70,
+ CSU_CSL_HDMI_TX = 71,
+ CSU_CSL_IRQ_STEER_Audio_Processor = 72,
+ CSU_CSL_SDMA2 = 73,
+ CSU_CSL_MU_1_A = 74,
+ CSU_CSL_MU_1_B = 75,
+ CSU_CSL_SEMAPHORE_HS = 76,
+ CSU_CSL_SAI1 = 78,
+ CSU_CSL_SAI2 = 79,
+ CSU_CSL_SAI3 = 80,
+ CSU_CSL_CAN_FD1 = 81,
+ CSU_CSL_SAI5 = 82,
+ CSU_CSL_SAI6 = 83,
+ CSU_CSL_uSDHC1 = 84,
+ CSU_CSL_uSDHC2 = 85,
+ CSU_CSL_uSDHC3 = 86,
+ CSU_CSL_PCIE_PHY1 = 87,
+ CSU_CSL_HDMI_TX_AUDLNK_MSTR = 88,
+ CSU_CSL_CAN_FD2 = 89,
+ CSU_CSL_SPBA2 = 90,
+ CSU_CSL_QSPI = 91,
+ CSU_CSL_AUDIO_BLK_CTRL = 92,
+ CSU_CSL_SDMA1 = 93,
+ CSU_CSL_ENET1 = 94,
+ CSU_CSL_ENET2_TSN = 95,
+ CSU_CSL_ASRC = 97,
+ CSU_CSL_eCSPI1 = 98,
+ CSU_CSL_eCSPI2 = 99,
+ CSU_CSL_eCSPI3 = 100,
+ CSU_CSL_SAI7 = 101,
+ CSU_CSL_UART1 = 102,
+ CSU_CSL_UART3 = 104,
+ CSU_CSL_UART2 = 105,
+ CSU_CSL_PDM_MICFIL = 106,
+ CSU_CSL_AUDIO_XCVR_RX_eARC = 107,
+ CSU_CSL_SDMA3 = 109,
+ CSU_CSL_SPBA1 = 111,
+ CSU_CSL_OCRAM_A = 113,
+ CSU_CSL_OCRAM = 118,
+ CSU_CSL_OCRAM_S = 119,
+ CSU_CSL_VPU = 120,
+};
+
+enum csu_sa_idx {
+ CSU_SA_M7 = 1,
+ CSU_SA_SDMA1 = 2,
+ CSU_SA_PCIE_CTRL1 = 3,
+ CSU_SA_USB1 = 4,
+ CSU_SA_USB2 = 6,
+ CSU_SA_APB_HDMA = 8,
+ CSU_SA_ENET1 = 9,
+ CSU_SA_USDHC1 = 10,
+ CSU_SA_USDHC2 = 11,
+ CSU_SA_USDHC3 = 12,
+ CSU_SA_HUGO = 13,
+ CSU_SA_DAP = 14,
+ CSU_SA_SDMA2 = 15,
+ CSU_SA_CAAM = 16,
+ CSU_SA_SDMA3 = 17,
+ CSU_SA_LCDIF1 = 18,
+ CSU_SA_ISI = 19,
+ CSU_SA_NPU = 20,
+ CSU_SA_LCDIF2 = 21,
+ CSU_SA_HDMI_TX = 22,
+ CSU_SA_ENET2 = 23,
+ CSU_SA_GPU3D = 24,
+ CSU_SA_GPU2D = 25,
+ CSU_SA_VPU_G1 = 26,
+ CSU_SA_VPU_G2 = 27,
+ CSU_SA_VPU_VC8000E = 28,
+ CSU_SA_AUDIO_EDMA = 29,
+ CSU_SA_ISP1 = 30,
+ CSU_SA_ISP2 = 31,
+ CSU_SA_DEWARP = 32,
+ CSU_SA_GIC500 = 33,
+};
+
+#endif /* IMX_SEC_DEF_H */
diff --git a/plat/imx/imx8m/imx8mp/include/platform_def.h b/plat/imx/imx8m/imx8mp/include/platform_def.h
index 832bed17ed..4a03830460 100644
--- a/plat/imx/imx8m/imx8mp/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mp/include/platform_def.h
@@ -1,13 +1,15 @@
/*
- * Copyright 2020 NXP
+ * Copyright 2020-2023 NXP
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef PLATFORM_DEF_H
#define PLATFORM_DEF_H
+#include <common/tbbr/tbbr_img_def.h>
#include <lib/utils_def.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/common_def.h>
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
@@ -34,8 +36,25 @@
#define PLAT_WAIT_RET_STATE U(1)
#define PLAT_STOP_OFF_STATE U(3)
-#define BL31_BASE U(0x960000)
-#define BL31_LIMIT U(0x980000)
+#if defined(NEED_BL2)
+#define BL2_BASE U(0x970000)
+#define BL2_SIZE SZ_128K
+#define BL2_LIMIT (BL2_BASE + BL2_SIZE)
+#define BL31_BASE U(0x950000)
+#define IMX_FIP_BASE U(0x40310000)
+#define IMX_FIP_SIZE U(0x000300000)
+#define IMX_FIP_LIMIT U(FIP_BASE + FIP_SIZE)
+
+/* Define FIP image location on eMMC */
+#define IMX_FIP_MMC_BASE U(0x100000)
+
+#define PLAT_IMX8MP_BOOT_MMC_BASE U(0x30B50000) /* SD */
+#else
+#define BL31_BASE U(0x970000)
+#endif
+
+#define BL31_SIZE SZ_128K
+#define BL31_LIMIT (BL31_BASE + BL31_SIZE)
#define PLAT_PRI_BITS U(3)
#define PLAT_SDEI_CRITICAL_PRI 0x10
@@ -43,7 +62,12 @@
#define PLAT_SDEI_SGI_PRIVATE U(9)
/* non-secure uboot base */
+#ifndef PLAT_NS_IMAGE_OFFSET
#define PLAT_NS_IMAGE_OFFSET U(0x40200000)
+#endif
+#define PLAT_NS_IMAGE_SIZE U(0x00200000)
+
+#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000)
/* GICv3 base address */
#define PLAT_GICD_BASE U(0x38800000)
@@ -90,8 +114,15 @@
#define IMX_DDRC_BASE U(0x3d400000)
#define IMX_DDRPHY_BASE U(0x3c000000)
#define IMX_DDR_IPS_BASE U(0x3d000000)
-#define IMX_DDR_IPS_SIZE U(0x1800000)
+#define IMX_DDR_IPS_SIZE U(0x1900000)
#define IMX_ROM_BASE U(0x0)
+#define IMX_ROM_SIZE U(0x40000)
+#define IMX_NS_OCRAM_BASE U(0x900000)
+#define IMX_NS_OCRAM_SIZE U(0x60000)
+#define IMX_CAAM_RAM_BASE U(0x100000)
+#define IMX_CAAM_RAM_SIZE U(0x10000)
+#define IMX_DRAM_BASE U(0x40000000)
+#define IMX_DRAM_SIZE U(0xc0000000)
#define IMX_GIC_BASE PLAT_GICD_BASE
#define IMX_GIC_SIZE U(0x200000)
@@ -150,10 +181,25 @@
#define IMX_WDOG_B_RESET
+#define MAX_IO_HANDLES 3U
+#define MAX_IO_DEVICES 2U
+#define MAX_IO_BLOCK_DEVICES 1U
+
#define GIC_MAP MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW)
#define AIPS_MAP MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW) /* AIPS map */
#define OCRAM_S_MAP MAP_REGION_FLAT(OCRAM_S_BASE, OCRAM_S_SIZE, MT_MEMORY | MT_RW) /* OCRAM_S */
#define DDRC_MAP MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW) /* DDRMIX */
#define NOC_MAP MAP_REGION_FLAT(IMX_NOC_BASE, IMX_NOC_SIZE, MT_DEVICE | MT_RW) /* NOC QoS */
+#define CAAM_RAM_MAP MAP_REGION_FLAT(IMX_CAAM_RAM_BASE, IMX_CAAM_RAM_SIZE, MT_MEMORY | MT_RW) /* CAMM RAM */
+#define NS_OCRAM_MAP MAP_REGION_FLAT(IMX_NS_OCRAM_BASE, IMX_NS_OCRAM_SIZE, MT_MEMORY | MT_RW) /* NS OCRAM */
+#define ROM_MAP MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO) /* ROM code */
+
+/*
+ * Note: DRAM region is mapped with entire size available and uses MT_RW
+ * attributes.
+ * See details in docs/plat/imx8m.rst "High Assurance Boot (HABv4)" section
+ * for explanation of this mapping scheme.
+ */
+#define DRAM_MAP MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS) /* DRAM */
#endif /* platform_def.h */
diff --git a/plat/imx/imx8m/imx8mp/platform.mk b/plat/imx/imx8m/imx8mp/platform.mk
index 6be2f9861c..40764b151d 100644
--- a/plat/imx/imx8m/imx8mp/platform.mk
+++ b/plat/imx/imx8m/imx8mp/platform.mk
@@ -1,18 +1,26 @@
#
-# Copyright 2019-2020 NXP
+# Copyright 2019-2022 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
#
PLAT_INCLUDES := -Iplat/imx/common/include \
-Iplat/imx/imx8m/include \
- -Iplat/imx/imx8m/imx8mp/include
+ -Iplat/imx/imx8m/imx8mp/include \
+ -Idrivers/imx/usdhc \
+ -Iinclude/common/tbbr
# Translation tables library
include lib/xlat_tables_v2/xlat_tables.mk
# Include GICv3 driver files
include drivers/arm/gic/v3/gicv3.mk
+IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \
+ plat/imx/imx8m/ddr/clock.c \
+ plat/imx/imx8m/ddr/dram_retention.c \
+ plat/imx/imx8m/ddr/ddr4_dvfs.c \
+ plat/imx/imx8m/ddr/lpddr4_dvfs.c
+
IMX_GIC_SOURCES := ${GICV3_SOURCES} \
plat/common/plat_gicv3.c \
plat/common/plat_psci_common.c \
@@ -20,16 +28,18 @@ IMX_GIC_SOURCES := ${GICV3_SOURCES} \
BL31_SOURCES += plat/imx/common/imx8_helpers.S \
plat/imx/imx8m/gpc_common.c \
+ plat/imx/imx8m/imx_hab.c \
plat/imx/imx8m/imx_aipstz.c \
plat/imx/imx8m/imx_rdc.c \
plat/imx/imx8m/imx8m_caam.c \
+ plat/imx/imx8m/imx8m_ccm.c \
+ plat/imx/imx8m/imx8m_csu.c \
plat/imx/imx8m/imx8m_psci_common.c \
+ plat/imx/imx8m/imx8m_snvs.c \
plat/imx/imx8m/imx8mp/imx8mp_bl31_setup.c \
plat/imx/imx8m/imx8mp/imx8mp_psci.c \
plat/imx/imx8m/imx8mp/gpc.c \
plat/imx/common/imx8_topology.c \
- plat/imx/common/imx_ehf.c \
- plat/imx/common/imx_sdei.c \
plat/imx/common/imx_sip_handler.c \
plat/imx/common/imx_sip_svc.c \
plat/imx/common/imx_uart_console.S \
@@ -37,9 +47,101 @@ BL31_SOURCES += plat/imx/common/imx8_helpers.S \
drivers/arm/tzc/tzc380.c \
drivers/delay_timer/delay_timer.c \
drivers/delay_timer/generic_delay_timer.c \
+ ${IMX_DRAM_SOURCES} \
${IMX_GIC_SOURCES} \
${XLAT_TABLES_LIB_SRCS}
+ifeq (${NEED_BL2},yes)
+BL2_SOURCES += common/desc_image_load.c \
+ plat/imx/common/imx8_helpers.S \
+ plat/imx/common/imx_uart_console.S \
+ plat/imx/imx8m/imx8mp/imx8mp_bl2_el3_setup.c \
+ plat/imx/imx8m/imx8mp/gpc.c \
+ plat/imx/imx8m/imx_aipstz.c \
+ plat/imx/imx8m/imx_rdc.c \
+ plat/imx/imx8m/imx8m_caam.c \
+ plat/common/plat_psci_common.c \
+ lib/cpus/aarch64/cortex_a53.S \
+ drivers/arm/tzc/tzc380.c \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ ${PLAT_GIC_SOURCES} \
+ ${PLAT_DRAM_SOURCES} \
+ ${XLAT_TABLES_LIB_SRCS} \
+ drivers/mmc/mmc.c \
+ drivers/io/io_block.c \
+ drivers/io/io_fip.c \
+ drivers/io/io_memmap.c \
+ drivers/io/io_storage.c \
+ drivers/imx/usdhc/imx_usdhc.c \
+ plat/imx/imx8m/imx8mp/imx8mp_bl2_mem_params_desc.c \
+ plat/imx/common/imx_io_storage.c \
+ plat/imx/imx8m/imx8m_image_load.c \
+ lib/optee/optee_utils.c
+endif
+
+# Add the build options to pack BLx images and kernel device tree
+# in the FIP if the platform requires.
+ifneq ($(BL2),)
+RESET_TO_BL31 := 0
+$(eval $(call TOOL_ADD_PAYLOAD,${BUILD_PLAT}/tb_fw.crt,--tb-fw-cert))
+endif
+ifneq ($(BL32_EXTRA1),)
+$(eval $(call TOOL_ADD_IMG,BL32_EXTRA1,--tos-fw-extra1))
+endif
+ifneq ($(BL32_EXTRA2),)
+$(eval $(call TOOL_ADD_IMG,BL32_EXTRA2,--tos-fw-extra2))
+endif
+ifneq ($(HW_CONFIG),)
+$(eval $(call TOOL_ADD_IMG,HW_CONFIG,--hw-config))
+endif
+
+ifeq (${NEED_BL2},yes)
+$(eval $(call add_define,NEED_BL2))
+LOAD_IMAGE_V2 := 1
+# Non-TF Boot ROM
+RESET_TO_BL2 := 1
+endif
+
+ifneq (${TRUSTED_BOARD_BOOT},0)
+
+include drivers/auth/mbedtls/mbedtls_crypto.mk
+include drivers/auth/mbedtls/mbedtls_x509.mk
+
+AUTH_SOURCES := drivers/auth/auth_mod.c \
+ drivers/auth/crypto_mod.c \
+ drivers/auth/img_parser_mod.c \
+ drivers/auth/tbbr/tbbr_cot_common.c \
+ drivers/auth/tbbr/tbbr_cot_bl2.c
+
+BL2_SOURCES += ${AUTH_SOURCES} \
+ plat/common/tbbr/plat_tbbr.c \
+ plat/imx/imx8m/imx8mp/imx8mp_trusted_boot.c \
+ plat/imx/imx8m/imx8mp/imx8mp_rotpk.S
+
+ROT_KEY = $(BUILD_PLAT)/rot_key.pem
+ROTPK_HASH = $(BUILD_PLAT)/rotpk_sha256.bin
+
+$(eval $(call add_define_val,ROTPK_HASH,'"$(ROTPK_HASH)"'))
+$(eval $(call MAKE_LIB_DIRS))
+
+$(BUILD_PLAT)/bl2/imx8mp_rotpk.o: $(ROTPK_HASH)
+
+certificates: $(ROT_KEY)
+
+$(ROT_KEY): | $(BUILD_PLAT)
+ @echo " OPENSSL $@"
+ @if [ ! -f $(ROT_KEY) ]; then \
+ ${OPENSSL_BIN_PATH}/openssl genrsa 2048 > $@ 2>/dev/null; \
+ fi
+
+$(ROTPK_HASH): $(ROT_KEY)
+ @echo " OPENSSL $@"
+ $(Q)${OPENSSL_BIN_PATH}/openssl rsa -in $< -pubout -outform DER 2>/dev/null |\
+ ${OPENSSL_BIN_PATH}/openssl dgst -sha256 -binary > $@ 2>/dev/null
+endif
+
+ENABLE_PIE := 1
USE_COHERENT_MEM := 1
RESET_TO_BL31 := 1
A53_DISABLE_NON_TEMPORAL_HINT := 0
@@ -48,6 +150,10 @@ ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+ifneq (${PRELOADED_BL33_BASE},)
+$(eval $(call add_define_val,PLAT_NS_IMAGE_OFFSET,${PRELOADED_BL33_BASE}))
+endif
+
BL32_BASE ?= 0x56000000
$(eval $(call add_define,BL32_BASE))
@@ -55,7 +161,17 @@ BL32_SIZE ?= 0x2000000
$(eval $(call add_define,BL32_SIZE))
IMX_BOOT_UART_BASE ?= 0x30890000
+ifeq (${IMX_BOOT_UART_BASE},auto)
+ override IMX_BOOT_UART_BASE := 0
+endif
$(eval $(call add_define,IMX_BOOT_UART_BASE))
-EL3_EXCEPTION_HANDLING := 1
-SDEI_SUPPORT := 1
+EL3_EXCEPTION_HANDLING := $(SDEI_SUPPORT)
+ifeq (${SDEI_SUPPORT}, 1)
+BL31_SOURCES += plat/imx/common/imx_ehf.c \
+ plat/imx/common/imx_sdei.c
+endif
+
+ifeq (${SPD},trusty)
+ BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1
+endif
diff --git a/plat/imx/imx8m/imx8mq/gpc.c b/plat/imx/imx8m/imx8mq/gpc.c
index 367c9411da..ebf92f7245 100644
--- a/plat/imx/imx8m/imx8mq/gpc.c
+++ b/plat/imx/imx8m/imx8mq/gpc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,13 +8,210 @@
#include <stdint.h>
#include <stdbool.h>
+#include <arch_helpers.h>
#include <common/debug.h>
+#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
-#include <platform_def.h>
+#include <lib/smccc.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
#include <services/std_svc.h>
#include <gpc.h>
+#include <platform_def.h>
+
+#define FSL_SIP_CONFIG_GPC_MASK U(0x00)
+#define FSL_SIP_CONFIG_GPC_UNMASK U(0x01)
+#define FSL_SIP_CONFIG_GPC_SET_WAKE U(0x02)
+#define FSL_SIP_CONFIG_GPC_PM_DOMAIN U(0x03)
+#define FSL_SIP_CONFIG_GPC_SET_AFF U(0x04)
+#define FSL_SIP_CONFIG_GPC_CORE_WAKE U(0x05)
+
+#define MAX_HW_IRQ_NUM U(128)
+#define MAX_IMR_NUM U(4)
+
+static uint32_t gpc_saved_imrs[16];
+static uint32_t gpc_wake_irqs[4];
+static uint32_t gpc_imr_offset[] = {
+ IMX_GPC_BASE + IMR1_CORE0_A53,
+ IMX_GPC_BASE + IMR1_CORE1_A53,
+ IMX_GPC_BASE + IMR1_CORE2_A53,
+ IMX_GPC_BASE + IMR1_CORE3_A53,
+ IMX_GPC_BASE + IMR1_CORE0_M4,
+};
+
+spinlock_t gpc_imr_lock[4];
+
+static void gpc_imr_core_spin_lock(unsigned int core_id)
+{
+ spin_lock(&gpc_imr_lock[core_id]);
+}
+
+static void gpc_imr_core_spin_unlock(unsigned int core_id)
+{
+ spin_unlock(&gpc_imr_lock[core_id]);
+}
+
+static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx)
+{
+ uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
+
+ gpc_imr_core_spin_lock(core_id);
+
+ gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg);
+ mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]);
+
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx)
+{
+ uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
+ uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4];
+
+ gpc_imr_core_spin_lock(core_id);
+
+ mmio_write_32(reg, val);
+
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+/*
+ * On i.MX8MQ, only in system suspend mode, the A53 cluster can
+ * enter LPM mode and shutdown the A53 PLAT power domain. So LPM
+ * wakeup only used for system suspend. when system enter suspend,
+ * any A53 CORE can be the last core to suspend the system, But
+ * the LPM wakeup can only use the C0's IMR to wakeup A53 cluster
+ * from LPM, so save C0's IMRs before suspend, restore back after
+ * resume.
+ */
+void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
+{
+ unsigned int imr, core;
+
+ if (pdn) {
+ for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
+ for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
+ gpc_save_imr_lpm(core, imr);
+ }
+ }
+ } else {
+ for (imr = 0U; imr < MAX_IMR_NUM; imr++) {
+ for (core = 0U; core < PLATFORM_CORE_COUNT; core++) {
+ gpc_restore_imr_lpm(core, imr);
+ }
+ }
+ }
+}
+
+static void imx_gpc_hwirq_mask(unsigned int hwirq)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ if (hwirq >= MAX_HW_IRQ_NUM) {
+ return;
+ }
+
+ gpc_imr_core_spin_lock(0);
+ reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val |= 1 << hwirq % 32;
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(0);
+}
+
+static void imx_gpc_hwirq_unmask(unsigned int hwirq)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ if (hwirq >= MAX_HW_IRQ_NUM) {
+ return;
+ }
+
+ gpc_imr_core_spin_lock(0);
+ reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val &= ~(1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(0);
+}
+
+static void imx_gpc_set_wake(uint32_t hwirq, bool on)
+{
+ uint32_t mask, idx;
+
+ if (hwirq >= MAX_HW_IRQ_NUM) {
+ return;
+ }
+
+ mask = 1 << hwirq % 32;
+ idx = hwirq / 32;
+ gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
+ gpc_wake_irqs[idx] & ~mask;
+}
+
+static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask)
+{
+ gpc_imr_core_spin_lock(core_id);
+ if (mask) {
+ mmio_setbits_32(gpc_imr_offset[core_id], 1);
+ } else {
+ mmio_clrbits_32(gpc_imr_offset[core_id], 1);
+ }
+
+ dsb();
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+void imx_gpc_core_wake(uint32_t cpumask)
+{
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (cpumask & (1 << i)) {
+ imx_gpc_mask_irq0(i, false);
+ }
+ }
+}
+
+void imx_gpc_set_a53_core_awake(uint32_t core_id)
+{
+ imx_gpc_mask_irq0(core_id, true);
+}
+
+static void imx_gpc_set_affinity(uint32_t hwirq, unsigned int cpu_idx)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ if (hwirq >= MAX_HW_IRQ_NUM || cpu_idx >= 4) {
+ return;
+ }
+
+ /*
+ * using the mask/unmask bit as affinity function.unmask the
+ * IMR bit to enable IRQ wakeup for this core.
+ */
+ gpc_imr_core_spin_lock(cpu_idx);
+ reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val &= ~(1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(cpu_idx);
+
+ /* clear affinity of other core */
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (cpu_idx != i) {
+ gpc_imr_core_spin_lock(i);
+ reg = gpc_imr_offset[i] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val |= (1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(i);
+ }
+ }
+}
/* use wfi power down the core */
void imx_set_cpu_pwr_off(unsigned int core_id)
@@ -64,7 +261,7 @@ void imx_pup_pdn_slot_config(int last_core, bool pdn)
mmio_setbits_32(IMX_GPC_BASE + SLTx_CFG(2), SLT_COREx_PUP(last_core));
/* ACK setting: PLAT ACK for PDN, CORE ACK for PUP */
mmio_clrsetbits_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, 0xFFFFFFFF,
- A53_PLAT_PDN_ACK | A53_PLAT_PUP_ACK);
+ A53_PLAT_PDN_ACK | SLT_COREx_PUP_ACK(last_core));
} else {
mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(0), 0xFFFFFFFF);
mmio_clrbits_32(IMX_GPC_BASE + SLTx_CFG(1), 0xFFFFFFFF);
@@ -123,26 +320,89 @@ void imx_set_cluster_powerdown(unsigned int last_core, uint8_t power_state)
}
}
+#define MAX_PLL_NUM U(12)
+
+static const struct pll_override imx8mq_pll[MAX_PLL_NUM] = {
+ {.reg = 0x0, .override_mask = 0x140000, },
+ {.reg = 0x8, .override_mask = 0x140000, },
+ {.reg = 0x10, .override_mask = 0x140000, },
+ {.reg = 0x18, .override_mask = 0x140000, },
+ {.reg = 0x20, .override_mask = 0x140000, },
+ {.reg = 0x28, .override_mask = 0x140000, },
+ {.reg = 0x30, .override_mask = 0x1555540, },
+ {.reg = 0x3c, .override_mask = 0x1555540, },
+ {.reg = 0x48, .override_mask = 0x140, },
+ {.reg = 0x54, .override_mask = 0x140, },
+ {.reg = 0x60, .override_mask = 0x140, },
+ {.reg = 0x70, .override_mask = 0xa, },
+};
+
+void imx_anamix_override(bool enter)
+{
+ unsigned int i;
+
+ /* enable the pll override bit before entering DSM mode */
+ for (i = 0; i < MAX_PLL_NUM; i++) {
+ if (enter) {
+ mmio_setbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg,
+ imx8mq_pll[i].override_mask);
+ } else {
+ mmio_clrbits_32(IMX_ANAMIX_BASE + imx8mq_pll[i].reg,
+ imx8mq_pll[i].override_mask);
+ }
+ }
+}
+
+int imx_gpc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3)
+{
+ switch (x1) {
+ case FSL_SIP_CONFIG_GPC_CORE_WAKE:
+ imx_gpc_core_wake(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_SET_WAKE:
+ imx_gpc_set_wake(x2, x3);
+ break;
+ case FSL_SIP_CONFIG_GPC_MASK:
+ imx_gpc_hwirq_mask(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_UNMASK:
+ imx_gpc_hwirq_unmask(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_SET_AFF:
+ imx_gpc_set_affinity(x2, x3);
+ break;
+ default:
+ return SMC_UNK;
+ }
+
+ return 0;
+}
+
void imx_gpc_init(void)
{
uint32_t val;
- int i;
+ unsigned int i, j;
+
/* mask all the interrupt by default */
- for (i = 0; i < 4; i++) {
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0);
+ for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ for (j = 0U; j < ARRAY_SIZE(gpc_imr_offset); j++) {
+ mmio_write_32(gpc_imr_offset[j] + i * 4, ~0x0);
+ }
}
+
/* Due to the hardware design requirement, need to make
* sure GPR interrupt(#32) is unmasked during RUN mode to
* avoid entering DSM mode by mistake.
*/
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53, 0xFFFFFFFE);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53, 0xFFFFFFFE);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53, 0xFFFFFFFE);
- mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53, 0xFFFFFFFE);
+ for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ mmio_write_32(gpc_imr_offset[i], ~0x1);
+ }
+
+ /* leave the IOMUX_GPC bit 12 on for core wakeup */
+ mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12);
/* use external IRQs to wakeup C0~C3 from LPM */
val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
@@ -157,7 +417,7 @@ void imx_gpc_init(void)
/* set all mix/PU in A53 domain */
mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xfffd);
- /* set SCU timming */
+ /* set SCU timing */
mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING,
(0x59 << 10) | 0x5B | (0x2 << 20));
@@ -176,6 +436,13 @@ void imx_gpc_init(void)
mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1);
mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1);
- /* enable all the power domain by default */
- mmio_write_32(IMX_GPC_BASE + PU_PGC_UP_TRG, 0x3fcf);
+ /*
+ * for USB OTG, the limitation are:
+ * 1. before system clock config, the IPG clock run at 12.5MHz, delay time
+ * should be longer than 82us.
+ * 2. after system clock config, ipg clock run at 66.5MHz, delay time
+ * be longer that 15.3 us.
+ * Add 100us to make sure the USB OTG SRC is clear safely.
+ */
+ udelay(100);
}
diff --git a/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
index 05b59705f0..7065a65861 100644
--- a/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
+++ b/plat/imx/imx8m/imx8mq/imx8mq_bl31_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,20 +18,39 @@
#include <drivers/generic_delay_timer.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/mmio.h>
-#include <lib/xlat_tables/xlat_tables.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/platform.h>
+#include <dram.h>
#include <gpc.h>
#include <imx_aipstz.h>
#include <imx_uart.h>
#include <imx8m_caam.h>
#include <plat_imx8.h>
+#define TRUSTY_PARAMS_LEN_BYTES (4096*2)
+
+/*
+ * Avoid the pointer dereference of the canonical mmio_read_8() implementation.
+ * This prevents the compiler from mis-interpreting the MMIO access as an
+ * illegal memory access to a very low address (the IMX ROM is mapped at 0).
+ */
+static uint8_t mmio_read_8_ldrb(uintptr_t address)
+{
+ uint8_t reg;
+
+ __asm__ volatile ("ldrb %w0, [%1]" : "=r" (reg) : "r" (address));
+
+ return reg;
+}
+
static const mmap_region_t imx_mmap[] = {
MAP_REGION_FLAT(GPV_BASE, GPV_SIZE, MT_DEVICE | MT_RW), /* GPV map */
MAP_REGION_FLAT(IMX_ROM_BASE, IMX_ROM_SIZE, MT_MEMORY | MT_RO), /* ROM map */
MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW), /* AIPS map */
MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW), /* GIC map */
+ MAP_REGION_FLAT(IMX_DDRPHY_BASE, IMX_DDR_IPS_SIZE, MT_DEVICE | MT_RW), /* DDRMIX map */
+ MAP_REGION_FLAT(IMX_DRAM_BASE, IMX_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
{0},
};
@@ -65,11 +84,11 @@ static void imx8mq_soc_info_init(void)
uint32_t ocotp_val;
imx_soc_revision = mmio_read_32(IMX_ANAMIX_BASE + ANAMIX_DIGPROG);
- rom_version = mmio_read_8(IMX_ROM_BASE + ROM_SOC_INFO_A0);
+ rom_version = mmio_read_8_ldrb(IMX_ROM_BASE + ROM_SOC_INFO_A0);
if (rom_version == 0x10)
return;
- rom_version = mmio_read_8(IMX_ROM_BASE + ROM_SOC_INFO_B0);
+ rom_version = mmio_read_8_ldrb(IMX_ROM_BASE + ROM_SOC_INFO_B0);
if (rom_version == 0x20) {
imx_soc_revision &= ~0xff;
imx_soc_revision |= rom_version;
@@ -80,7 +99,11 @@ static void imx8mq_soc_info_init(void)
ocotp_val = mmio_read_32(IMX_OCOTP_BASE + OCOTP_SOC_INFO_B1);
if (ocotp_val == 0xff0055aa) {
imx_soc_revision &= ~0xff;
- imx_soc_revision |= 0x21;
+ if (rom_version == 0x22) {
+ imx_soc_revision |= 0x22;
+ } else {
+ imx_soc_revision |= 0x21;
+ }
return;
}
}
@@ -122,6 +145,7 @@ static void bl31_tz380_setup(void)
void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
u_register_t arg2, u_register_t arg3)
{
+ static console_t console;
int i;
/* enable CSU NS access permission */
for (i = 0; i < 64; i++) {
@@ -130,14 +154,13 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
imx_aipstz_init(aipstz);
- imx8m_caam_init();
-
-#if DEBUG_CONSOLE
- static console_t console;
-
console_imx_uart_register(IMX_BOOT_UART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
IMX_CONSOLE_BAUDRATE, &console);
-#endif
+ /* This console is only used for boot stage */
+ console_set_scope(&console, CONSOLE_FLAG_BOOT);
+
+ imx8m_caam_init();
+
/*
* tell BL3-1 where the non-secure software image is located
* and the entry state information.
@@ -146,7 +169,7 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
bl33_image_ep_info.spsr = get_spsr_for_bl33_entry();
SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
-#ifdef SPD_opteed
+#if defined(SPD_opteed) || defined(SPD_trusty)
/* Populate entry point information for BL32 */
SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
@@ -156,6 +179,16 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
/* Pass TEE base and size to bl33 */
bl33_image_ep_info.args.arg1 = BL32_BASE;
bl33_image_ep_info.args.arg2 = BL32_SIZE;
+
+#ifdef SPD_trusty
+ bl32_image_ep_info.args.arg0 = BL32_SIZE;
+ bl32_image_ep_info.args.arg1 = BL32_BASE;
+#else
+ /* Make sure memory is clean */
+ mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0);
+ bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+ bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+#endif
#endif
bl31_tz380_setup();
@@ -163,20 +196,22 @@ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
void bl31_plat_arch_setup(void)
{
- mmap_add_region(BL31_BASE, BL31_BASE, (BL31_LIMIT - BL31_BASE),
- MT_MEMORY | MT_RW | MT_SECURE);
- mmap_add_region(BL_CODE_BASE, BL_CODE_BASE, (BL_CODE_END - BL_CODE_BASE),
- MT_MEMORY | MT_RO | MT_SECURE);
-
- mmap_add(imx_mmap);
-
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL31_START, BL31_SIZE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_MEMORY | MT_RO | MT_SECURE),
#if USE_COHERENT_MEM
- mmap_add_region(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_BASE,
- BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
- MT_DEVICE | MT_RW | MT_SECURE);
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
#endif
- /* setup xlat table */
- init_xlat_tables();
+ /* Map TEE memory */
+ MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW),
+ {0},
+ };
+
+ setup_page_tables(bl_regions, imx_mmap);
/* enable the MMU */
enable_mmu_el3(0);
}
@@ -194,6 +229,8 @@ void bl31_platform_setup(void)
/* gpc init */
imx_gpc_init();
+
+ dram_info_init(SAVED_DRAM_TIMING_BASE);
}
entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type)
@@ -211,7 +248,11 @@ unsigned int plat_get_syscnt_freq2(void)
return COUNTER_FREQUENCY;
}
-void bl31_plat_runtime_setup(void)
+#ifdef SPD_trusty
+void plat_trusty_set_boot_args(aapcs64_params_t *args)
{
- return;
+ args->arg0 = BL32_SIZE;
+ args->arg1 = BL32_BASE;
+ args->arg2 = TRUSTY_PARAMS_LEN_BYTES;
}
+#endif
diff --git a/plat/imx/imx8m/imx8mq/imx8mq_psci.c b/plat/imx/imx8m/imx8mq/imx8mq_psci.c
index 662017d6fe..3375ce71bc 100644
--- a/plat/imx/imx8m/imx8mq/imx8mq_psci.c
+++ b/plat/imx/imx8m/imx8mq/imx8mq_psci.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,9 +9,11 @@
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
+#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
+#include <dram.h>
#include <gpc.h>
#include <imx8m_psci.h>
#include <plat_imx8.h>
@@ -39,9 +41,24 @@ int imx_validate_power_state(unsigned int power_state,
return PSCI_E_SUCCESS;
}
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+ plat_gic_cpuif_disable();
+ imx_set_cpu_pwr_off(core_id);
+
+ /*
+ * TODO: Find out why this is still
+ * needed in order not to break suspend
+ */
+ udelay(50);
+}
+
void imx_domain_suspend(const psci_power_state_t *target_state)
{
- uint64_t base_addr = BL31_BASE;
+ uint64_t base_addr = BL31_START;
uint64_t mpidr = read_mpidr_el1();
unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
@@ -57,12 +74,14 @@ void imx_domain_suspend(const psci_power_state_t *target_state)
}
if (is_local_state_off(CLUSTER_PWR_STATE(target_state)))
- imx_set_cluster_powerdown(core_id, true);
+ imx_set_cluster_powerdown(core_id, CLUSTER_PWR_STATE(target_state));
else
imx_set_cluster_standby(true);
if (is_local_state_retn(SYSTEM_PWR_STATE(target_state))) {
imx_set_sys_lpm(core_id, true);
+ dram_enter_retention();
+ imx_anamix_override(true);
}
}
@@ -73,18 +92,22 @@ void imx_domain_suspend_finish(const psci_power_state_t *target_state)
/* check the system level status */
if (is_local_state_retn(SYSTEM_PWR_STATE(target_state))) {
+ imx_anamix_override(false);
+ dram_exit_retention();
imx_set_sys_lpm(core_id, false);
imx_clear_rbc_count();
}
/* check the cluster level power status */
if (is_local_state_off(CLUSTER_PWR_STATE(target_state)))
- imx_set_cluster_powerdown(core_id, false);
+ imx_set_cluster_powerdown(core_id, PSCI_LOCAL_STATE_RUN);
else
imx_set_cluster_standby(false);
/* check the core level power status */
if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+ /* mark this core as awake by masking IRQ0 */
+ imx_gpc_set_a53_core_awake(core_id);
/* clear the core lpm setting */
imx_set_cpu_lpm(core_id, false);
/* enable the gic cpu interface */
diff --git a/plat/imx/imx8m/imx8mq/include/imx_sec_def.h b/plat/imx/imx8m/imx8mq/include/imx_sec_def.h
new file mode 100644
index 0000000000..0f771415f9
--- /dev/null
+++ b/plat/imx/imx8m/imx8mq/include/imx_sec_def.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2020-2022 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_SEC_DEF_H
+#define IMX_SEC_DEF_H
+
+/* RDC MDA index */
+enum rdc_mda_idx {
+ RDC_MDA_A53 = 0,
+ RDC_MDA_M4 = 1,
+ RDC_MDA_PCIE_CTRL1 = 2,
+ RDC_MDA_PCIE_CTRL2 = 3,
+ RDC_MDA_VPU_DEC = 4,
+ RDC_MDA_LCDIF = 5,
+ RDC_MDA_CSI1 = 6,
+ RDC_MDA_CSI2 = 7,
+ RDC_MDA_Coresight = 8,
+ RDC_MDA_DAP = 9,
+ RDC_MDA_CAAM = 10,
+ RDC_MDA_SDMAp = 11,
+ RDC_MDA_SDMAb = 12,
+ RDC_MDA_APBHDMA = 13,
+ RDC_MDA_RAWNAND = 14,
+ RDC_MDA_uSDHC1 = 15,
+ RDC_MDA_uSDHC2 = 16,
+ RDC_MDA_DCSS = 17,
+ RDC_MDA_GPU = 18,
+ RDC_MDA_USB1 = 19,
+ RDC_MDA_USB2 = 20,
+ RDC_MDA_TESTPORT = 21,
+ RDC_MDA_ENET1_TX = 22,
+ RDC_MDA_ENET1_RX = 23,
+ RDC_MDA_SDMA2 = 24,
+ RDC_MDA_SDMA1 = 26,
+};
+
+/* RDC Peripherals index */
+enum rdc_pdap_idx {
+ RDC_PDAP_GPIO1 = 0,
+ RDC_PDAP_GPIO2 = 1,
+ RDC_PDAP_GPIO3 = 2,
+ RDC_PDAP_GPIO4 = 3,
+ RDC_PDAP_GPIO5 = 4,
+ RDC_PDAP_ANA_TSENSOR = 6,
+ RDC_PDAP_ANA_OSC = 7,
+ RDC_PDAP_WDOG1 = 8,
+ RDC_PDAP_WDOG2 = 9,
+ RDC_PDAP_WDOG3 = 10,
+ RDC_PDAP_SDMA2 = 12,
+ RDC_PDAP_GPT1 = 13,
+ RDC_PDAP_GPT2 = 14,
+ RDC_PDAP_GPT3 = 15,
+ RDC_PDAP_ROMCP = 17,
+ RDC_PDAP_LCDIF = 18,
+ RDC_PDAP_IOMUXC = 19,
+ RDC_PDAP_IOMUXC_GPR = 20,
+ RDC_PDAP_OCOTP_CTRL = 21,
+ RDC_PDAP_ANATOP_PLL = 22,
+ RDC_PDAP_SNVS_HP = 23,
+ RDC_PDAP_CCM = 24,
+ RDC_PDAP_SRC = 25,
+ RDC_PDAP_GPC = 26,
+ RDC_PDAP_SEMAPHORE1 = 27,
+ RDC_PDAP_SEMAPHORE2 = 28,
+ RDC_PDAP_RDC = 29,
+ RDC_PDAP_CSU = 30,
+ RDC_PDAP_MST0 = 32,
+ RDC_PDAP_MST1 = 33,
+ RDC_PDAP_MST2 = 34,
+ RDC_PDAP_MST3 = 35,
+ RDC_PDAP_HDMI_SEC = 36,
+ RDC_PDAP_PWM1 = 38,
+ RDC_PDAP_PWM2 = 39,
+ RDC_PDAP_PWM3 = 40,
+ RDC_PDAP_PWM4 = 41,
+ RDC_PDAP_SysCounter_RD = 42,
+ RDC_PDAP_SysCounter_CMP = 43,
+ RDC_PDAP_SysCounter_CTRL = 44,
+ RDC_PDAP_HDMI_CTRL = 45,
+ RDC_PDAP_GPT6 = 46,
+ RDC_PDAP_GPT5 = 47,
+ RDC_PDAP_GPT4 = 48,
+ RDC_PDAP_TZASC = 56,
+ RDC_PDAP_MTR = 59,
+ RDC_PDAP_PERFMON1 = 60,
+ RDC_PDAP_PERFMON2 = 61,
+ RDC_PDAP_PLATFORM_CTRL = 62,
+ RDC_PDAP_QoSC = 63,
+ RDC_PDAP_MIPI_PHY = 64,
+ RDC_PDAP_MIPI_DSI = 65,
+ RDC_PDAP_I2C1 = 66,
+ RDC_PDAP_I2C2 = 67,
+ RDC_PDAP_I2C3 = 68,
+ RDC_PDAP_I2C4 = 69,
+ RDC_PDAP_UART4 = 70,
+ RDC_PDAP_MIPI_CSI1 = 71,
+ RDC_PDAP_MIPI_CSI_PHY1 = 72,
+ RDC_PDAP_CSI1 = 73,
+ RDC_PDAP_MU_A = 74,
+ RDC_PDAP_MU_B = 75,
+ RDC_PDAP_SEMAPHORE_HS = 76,
+ RDC_PDAP_SAI1 = 78,
+ RDC_PDAP_SAI6 = 80,
+ RDC_PDAP_SAI5 = 81,
+ RDC_PDAP_SAI4 = 82,
+ RDC_PDAP_USDHC1 = 84,
+ RDC_PDAP_USDHC2 = 85,
+ RDC_PDAP_MIPI_CSI2 = 86,
+ RDC_PDAP_MIPI_CSI_PHY2 = 87,
+ RDC_PDAP_CSI2 = 88,
+ RDC_PDAP_QSPI = 91,
+ RDC_PDAP_SDMA1 = 93,
+ RDC_PDAP_ENET1 = 94,
+ RDC_PDAP_SPDIF1 = 97,
+ RDC_PDAP_ECSPI1 = 98,
+ RDC_PDAP_ECSPI2 = 99,
+ RDC_PDAP_ECSPI3 = 100,
+ RDC_PDAP_UART1 = 102,
+ RDC_PDAP_UART3 = 104,
+ RDC_PDAP_UART2 = 105,
+ RDC_PDAP_SPDIF2 = 106,
+ RDC_PDAP_SAI2 = 107,
+ RDC_PDAP_SAI3 = 108,
+ RDC_PDAP_SPBA1 = 111,
+ RDC_PDAP_CAAM = 114,
+ RDC_PDAP_DDRC_SEC = 115,
+ RDC_PDAP_GIC_EXSC = 116,
+ RDC_PDAP_USB_EXSC = 117,
+ RDC_PDAP_OCRAM_TZ = 118,
+ RDC_PDAP_OCRAM_S_TZ = 119,
+ RDC_PDAP_VPU_SEC = 120,
+ RDC_PDAP_DAP_EXSC = 121,
+ RDC_PDAP_ROMCP_SEC = 122,
+ RDC_PDAP_APBHDMA_SEC = 123,
+ RDC_PDAP_M4_SEC = 124,
+ RDC_PDAP_QSPI_SEC = 125,
+ RDC_PDAP_GPU_EXSC = 126,
+ RDC_PDAP_PCIE = 127,
+};
+
+enum csu_csl_idx {
+ CSU_CSL_GPIO1 = 0,
+ CSU_CSL_GPIO2 = 1,
+ CSU_CSL_GPIO3 = 2,
+ CSU_CSL_GPIO4 = 3,
+ CSU_CSL_GPIO5 = 4,
+ CSU_CSL_ANA_TSENSOR = 6,
+ CSU_CSL_ANA_OSC = 7,
+ CSU_CSL_WDOG1 = 8,
+ CSU_CSL_WDOG2 = 9,
+ CSU_CSL_WDOG3 = 10,
+ CSU_CSL_SDMA2 = 12,
+ CSU_CSL_GPT1 = 13,
+ CSU_CSL_GPT2 = 14,
+ CSU_CSL_GPT3 = 15,
+ CSU_CSL_ROMCP = 17,
+ CSU_CSL_LCDIF = 18,
+ CSU_CSL_IOMUXC = 19,
+ CSU_CSL_IOMUXC_GPR = 20,
+ CSU_CSL_OCOTP_CTRL = 21,
+ CSU_CSL_ANATOP_PLL = 22,
+ CSU_CSL_SNVS_HP = 23,
+ CSU_CSL_CCM = 24,
+ CSU_CSL_SRC = 25,
+ CSU_CSL_GPC = 26,
+ CSU_CSL_SEMAPHORE1 = 27,
+ CSU_CSL_SEMAPHORE2 = 28,
+ CSU_CSL_RDC = 29,
+ CSU_CSL_CSU = 30,
+ CSU_CSL_MST0 = 32,
+ CSU_CSL_MST1 = 33,
+ CSU_CSL_MST2 = 34,
+ CSU_CSL_MST3 = 35,
+ CSU_CSL_HDMI_SEC = 36,
+ CSU_CSL_PWM1 = 38,
+ CSU_CSL_PWM2 = 39,
+ CSU_CSL_PWM3 = 40,
+ CSU_CSL_PWM4 = 41,
+ CSU_CSL_SysCounter_RD = 42,
+ CSU_CSL_SysCounter_CMP = 43,
+ CSU_CSL_SysCounter_CTRL = 44,
+ CSU_CSL_HDMI_CTRL = 45,
+ CSU_CSL_GPT6 = 46,
+ CSU_CSL_GPT5 = 47,
+ CSU_CSL_GPT4 = 48,
+ CSU_CSL_TZASC = 56,
+ CSU_CSL_MTR = 59,
+ CSU_CSL_PERFMON1 = 60,
+ CSU_CSL_PERFMON2 = 61,
+ CSU_CSL_PLATFORM_CTRL = 62,
+ CSU_CSL_QoSC = 63,
+ CSU_CSL_MIPI_PHY = 64,
+ CSU_CSL_MIPI_DSI = 65,
+ CSU_CSL_I2C1 = 66,
+ CSU_CSL_I2C2 = 67,
+ CSU_CSL_I2C3 = 68,
+ CSU_CSL_I2C4 = 69,
+ CSU_CSL_UART4 = 70,
+ CSU_CSL_MIPI_CSI1 = 71,
+ CSU_CSL_MIPI_CSI_PHY1 = 72,
+ CSU_CSL_CSI1 = 73,
+ CSU_CSL_MU_A = 74,
+ CSU_CSL_MU_B = 75,
+ CSU_CSL_SEMAPHORE_HS = 76,
+ CSU_CSL_SAI1 = 78,
+ CSU_CSL_SAI6 = 80,
+ CSU_CSL_SAI5 = 81,
+ CSU_CSL_SAI4 = 82,
+ CSU_CSL_USDHC1 = 84,
+ CSU_CSL_USDHC2 = 85,
+ CSU_CSL_MIPI_CSI2 = 86,
+ CSU_CSL_MIPI_CSI_PHY2 = 87,
+ CSU_CSL_CSI2 = 88,
+ CSU_CSL_QSPI = 91,
+ CSU_CSL_SDMA1 = 93,
+ CSU_CSL_ENET1 = 94,
+ CSU_CSL_SPDIF1 = 97,
+ CSU_CSL_ECSPI1 = 98,
+ CSU_CSL_ECSPI2 = 99,
+ CSU_CSL_ECSPI3 = 100,
+ CSU_CSL_UART1 = 102,
+ CSU_CSL_UART3 = 104,
+ CSU_CSL_UART2 = 105,
+ CSU_CSL_SPDIF2 = 106,
+ CSU_CSL_SAI2 = 107,
+ CSU_CSL_SAI3 = 108,
+ CSU_CSL_SPBA1 = 111,
+ CSU_CSL_MOD_EN3 = 112,
+ CSU_CSL_MOD_EN0 = 113,
+ CSU_CSL_CAAM = 114,
+ CSU_CSL_DDRC_SEC = 115,
+ CSU_CSL_GIC_EXSC = 116,
+ CSU_CSL_USB_EXSC = 117,
+ CSU_CSL_OCRAM_TZ = 118,
+ CSU_CSL_OCRAM_S_TZ = 119,
+ CSU_CSL_VPU_SEC = 120,
+ CSU_CSL_DAP_EXSC = 121,
+ CSU_CSL_ROMCP_SEC = 122,
+ CSU_CSL_APBHDMA_SEC = 123,
+ CSU_CSL_M4_SEC = 124,
+ CSU_CSL_QSPI_SEC = 125,
+ CSU_CSL_GPU_EXSC = 126,
+ CSU_CSL_PCIE = 127,
+};
+
+#endif /* IMX_SEC_DEF_H */
diff --git a/plat/imx/imx8m/imx8mq/include/platform_def.h b/plat/imx/imx8m/imx8mq/include/platform_def.h
index 6d6a8650e8..b04f3915ec 100644
--- a/plat/imx/imx8m/imx8mq/include/platform_def.h
+++ b/plat/imx/imx8m/imx8mq/include/platform_def.h
@@ -1,9 +1,12 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <lib/utils_def.h>
+#include <plat/common/common_def.h>
+
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
@@ -31,10 +34,14 @@
#define PLAT_STOP_OFF_STATE U(3)
#define BL31_BASE U(0x910000)
-#define BL31_LIMIT U(0x920000)
+#define BL31_SIZE SZ_64K
+#define BL31_LIMIT (BL31_BASE + BL31_SIZE)
/* non-secure uboot base */
+#ifndef PLAT_NS_IMAGE_OFFSET
#define PLAT_NS_IMAGE_OFFSET U(0x40200000)
+#endif
+#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000)
/* GICv3 base address */
#define PLAT_GICD_BASE U(0x38800000)
@@ -43,12 +50,16 @@
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32)
#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32)
+#ifdef SPD_trusty
+#define MAX_XLAT_TABLES 5
+#define MAX_MMAP_REGIONS 15
+#else
#define MAX_XLAT_TABLES 4
#define MAX_MMAP_REGIONS 14
+#endif
#define HAB_RVT_BASE U(0x00000880) /* HAB_RVT for i.MX8MQ */
-#define IMX_BOOT_UART_BASE U(0x30860000)
#define IMX_BOOT_UART_CLK_IN_HZ 25000000 /* Select 25Mhz oscillator */
#define PLAT_CRASH_UART_BASE IMX_BOOT_UART_BASE
#define PLAT_CRASH_UART_CLK_IN_HZ 25000000
@@ -74,6 +85,9 @@
#define IMX_DDRC_BASE U(0x3d400000)
#define IMX_DDRPHY_BASE U(0x3c000000)
#define IMX_DDR_IPS_BASE U(0x3d000000)
+#define IMX_DDR_IPS_SIZE U(0x1800000)
+#define IMX_DRAM_BASE U(0x40000000)
+#define IMX_DRAM_SIZE U(0xc0000000)
#define IMX_ROM_BASE U(0x00000000)
#define IMX_ROM_SIZE U(0x20000)
@@ -111,6 +125,12 @@
#define SNVS_LPCR_DP_EN BIT(5)
#define SNVS_LPCR_TOP BIT(6)
+#define SAVED_DRAM_TIMING_BASE U(0x40000000)
+
+#define HW_DRAM_PLL_CFG0 (IMX_ANAMIX_BASE + 0x60)
+#define HW_DRAM_PLL_CFG1 (IMX_ANAMIX_BASE + 0x64)
+#define HW_DRAM_PLL_CFG2 (IMX_ANAMIX_BASE + 0x68)
+#define DRAM_PLL_CTRL HW_DRAM_PLL_CFG0
#define IOMUXC_GPR10 U(0x28)
#define GPR_TZASC_EN BIT(0)
@@ -120,7 +140,6 @@
#define OCRAM_S_SIZE U(0x8000)
#define OCRAM_S_LIMIT (OCRAM_S_BASE + OCRAM_S_SIZE)
-#define COUNTER_FREQUENCY 8000000 /* 8MHz */
+#define COUNTER_FREQUENCY 8333333 /* 25MHz / 3 */
-#define DEBUG_CONSOLE 0
#define IMX_WDOG_B_RESET
diff --git a/plat/imx/imx8m/imx8mq/platform.mk b/plat/imx/imx8m/imx8mq/platform.mk
index 546101043a..2356cbd033 100644
--- a/plat/imx/imx8m/imx8mq/platform.mk
+++ b/plat/imx/imx8m/imx8mq/platform.mk
@@ -1,9 +1,12 @@
#
-# Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# Translation tables library
+include lib/xlat_tables_v2/xlat_tables.mk
+
PLAT_INCLUDES := -Iplat/imx/common/include \
-Iplat/imx/imx8m/include \
-Iplat/imx/imx8m/imx8mq/include
@@ -11,6 +14,12 @@ PLAT_INCLUDES := -Iplat/imx/common/include \
# Include GICv3 driver files
include drivers/arm/gic/v3/gicv3.mk
+IMX_DRAM_SOURCES := plat/imx/imx8m/ddr/dram.c \
+ plat/imx/imx8m/ddr/clock.c \
+ plat/imx/imx8m/ddr/dram_retention.c \
+ plat/imx/imx8m/ddr/ddr4_dvfs.c \
+ plat/imx/imx8m/ddr/lpddr4_dvfs.c
+
IMX_GIC_SOURCES := ${GICV3_SOURCES} \
plat/common/plat_gicv3.c \
plat/common/plat_psci_common.c \
@@ -28,24 +37,37 @@ BL31_SOURCES += plat/imx/common/imx8_helpers.S \
plat/imx/common/imx_sip_handler.c \
plat/imx/common/imx_sip_svc.c \
plat/imx/common/imx_uart_console.S \
- lib/xlat_tables/aarch64/xlat_tables.c \
- lib/xlat_tables/xlat_tables_common.c \
lib/cpus/aarch64/cortex_a53.S \
drivers/arm/tzc/tzc380.c \
drivers/delay_timer/delay_timer.c \
drivers/delay_timer/generic_delay_timer.c \
+ ${XLAT_TABLES_LIB_SRCS} \
+ ${IMX_DRAM_SOURCES} \
${IMX_GIC_SOURCES}
+ENABLE_PIE := 1
USE_COHERENT_MEM := 1
RESET_TO_BL31 := 1
A53_DISABLE_NON_TEMPORAL_HINT := 0
+WARMBOOT_ENABLE_DCACHE_EARLY := 1
ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
ERRATA_A53_855873 := 1
+ifneq (${PRELOADED_BL33_BASE},)
+$(eval $(call add_define_val,PLAT_NS_IMAGE_OFFSET,${PRELOADED_BL33_BASE}))
+endif
+
BL32_BASE ?= 0xfe000000
$(eval $(call add_define,BL32_BASE))
BL32_SIZE ?= 0x2000000
$(eval $(call add_define,BL32_SIZE))
+
+IMX_BOOT_UART_BASE ?= 0x30860000
+$(eval $(call add_define,IMX_BOOT_UART_BASE))
+
+ifeq (${SPD},trusty)
+ BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1
+endif
diff --git a/plat/imx/imx8m/imx_hab.c b/plat/imx/imx8m/imx_hab.c
new file mode 100644
index 0000000000..222046fb9a
--- /dev/null
+++ b/plat/imx/imx8m/imx_hab.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2017-2020 NXP
+ * Copyright 2022 Leica Geosystems AG
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/runtime_svc.h>
+#include <imx_sip_svc.h>
+
+#define HAB_CID_ATF U(2) /* TF-A Caller ID */
+
+/* HAB Status definitions */
+enum hab_status {
+ HAB_STS_ANY = 0x00, /* Match any status in report_event() */
+ HAB_FAILURE = 0x33, /* Operation failed */
+ HAB_WARNING = 0x69, /* Operation completed with warning */
+ HAB_SUCCESS = 0xf0 /* Operation completed successfully */
+};
+
+/* HAB Configuration definitions */
+enum hab_config {
+ HAB_CFG_RETURN = 0x33, /* Field Return IC */
+ HAB_CFG_OPEN = 0xf0, /* Non-secure IC */
+ HAB_CFG_CLOSED = 0xcc /* Secure IC */
+};
+
+/* HAB State definitions */
+enum hab_state {
+ HAB_STATE_INITIAL = 0x33, /* Initializing state (transitory) */
+ HAB_STATE_CHECK = 0x55, /* Check state (non-secure) */
+ HAB_STATE_NONSECURE = 0x66, /* Non-secure state */
+ HAB_STATE_TRUSTED = 0x99, /* Trusted state */
+ HAB_STATE_SECURE = 0xaa, /* Secure state */
+ HAB_STATE_FAIL_SOFT = 0xcc, /* Soft fail state */
+ HAB_STATE_FAIL_HARD = 0xff, /* Hard fail state (terminal) */
+ HAB_STATE_NONE = 0xf0 /* No security state machine */
+};
+
+/* HAB Verification Target definitions */
+enum hab_target {
+ HAB_TGT_MEMORY = 0x0f, /* Check memory allowed list */
+ HAB_TGT_PERIPHERAL = 0xf0, /* Check peripheral allowed list */
+ HAB_TGT_ANY = 0x55 /* Check memory & peripheral allowed list */
+};
+
+/* Authenticate Image Loader Callback prototype */
+typedef enum hab_status hab_loader_callback_f_t(void **, size_t *, const void *);
+
+/*
+ * HAB Rom VectorTable (RVT) structure.
+ * This table provides function pointers into the HAB library in ROM for
+ * use by post-ROM boot sequence components.
+ * Functions are ordered in the structure below based on the offsets in ROM
+ * image, and shall not be changed!
+ * Details on API allocation offsets and function description could be
+ * found in following documents from NXP:
+ * - High Assurance Boot Version 4 Application Programming Interface
+ * Reference Manual (available in CST package)
+ * - HABv4 RVT Guidelines and Recommendations (AN12263)
+ */
+struct hab_rvt_api {
+ uint64_t hdr;
+ enum hab_status (*entry)(void);
+ enum hab_status (*exit)(void);
+ enum hab_status (*check_target)(enum hab_target type, const void *start, size_t bytes);
+ void* (*authenticate_image)(uint8_t cid, long ivt_offset, void **start,
+ size_t *bytes, hab_loader_callback_f_t loader);
+ enum hab_status (*run_dcd)(const uint8_t *dcd);
+ enum hab_status (*run_csf)(const uint8_t *csf, uint8_t cid, uint32_t srkmask);
+ enum hab_status (*assert)(long type, const void *data, uint32_t count);
+ enum hab_status (*report_event)(enum hab_status status, uint32_t index,
+ uint8_t *event, size_t *bytes);
+ enum hab_status (*report_status)(enum hab_config *config, enum hab_state *state);
+ void (*failsafe)(void);
+ void* (*authenticate_image_no_dcd)(uint8_t cid, long ivt_offset, void **start,
+ size_t *bytes, hab_loader_callback_f_t loader);
+ uint32_t (*get_version)(void);
+ enum hab_status (*authenticate_container)(uint8_t cid, long ivt_offset, void **start,
+ size_t *bytes, hab_loader_callback_f_t loader, uint32_t srkmask, int skip_dcd);
+};
+
+struct hab_rvt_api *g_hab_rvt_api = (struct hab_rvt_api *)HAB_RVT_BASE;
+
+/*******************************************************************************
+ * Handler for servicing HAB SMC calls
+ ******************************************************************************/
+int imx_hab_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4)
+{
+ switch (x1) {
+ case IMX_SIP_HAB_ENTRY:
+ return g_hab_rvt_api->entry();
+ case IMX_SIP_HAB_EXIT:
+ return g_hab_rvt_api->exit();
+ case IMX_SIP_HAB_CHECK_TARGET:
+ return g_hab_rvt_api->check_target((enum hab_target)x2,
+ (const void *)x3, (size_t)x4);
+ case IMX_SIP_HAB_AUTH_IMG:
+ return (unsigned long)g_hab_rvt_api->authenticate_image(HAB_CID_ATF,
+ x2, (void **)x3, (size_t *)x4, NULL);
+ case IMX_SIP_HAB_REPORT_EVENT:
+ return g_hab_rvt_api->report_event(HAB_FAILURE,
+ (uint32_t)x2, (uint8_t *)x3, (size_t *)x4);
+ case IMX_SIP_HAB_REPORT_STATUS:
+ return g_hab_rvt_api->report_status((enum hab_config *)x2,
+ (enum hab_state *)x3);
+ case IMX_SIP_HAB_FAILSAFE:
+ g_hab_rvt_api->failsafe();
+ break;
+ case IMX_SIP_HAB_AUTH_IMG_NO_DCD:
+ return (unsigned long)g_hab_rvt_api->authenticate_image_no_dcd(
+ HAB_CID_ATF, x2, (void **)x3, (size_t *)x4, NULL);
+ case IMX_SIP_HAB_GET_VERSION:
+ return g_hab_rvt_api->get_version();
+ default:
+ return SMC_UNK;
+ };
+
+ return SMC_OK;
+}
diff --git a/plat/imx/imx8m/include/ddrc.h b/plat/imx/imx8m/include/ddrc.h
new file mode 100644
index 0000000000..55af3ff0c3
--- /dev/null
+++ b/plat/imx/imx8m/include/ddrc.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright 2019-2022 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_DDRC_H
+#define IMX_DDRC_H
+
+#define DDRC_IPS_BASE_ADDR(X) (0x3d400000 + ((X) * 0x2000000))
+#define DDRC_DDR_SS_GPR0 0x3d000000
+
+/* DWC ddr umctl2 REGs offset*/
+/**********************/
+#define DDRC_MSTR(X) (DDRC_IPS_BASE_ADDR(X) + 0x00)
+#define DDRC_STAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x04)
+#define DDRC_MSTR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x08)
+#define DDRC_MRCTRL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x10)
+#define DDRC_MRCTRL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x14)
+#define DDRC_MRSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x18)
+#define DDRC_MRCTRL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1c)
+#define DDRC_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x20)
+#define DDRC_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x24)
+#define DDRC_MSTR2(X) (DDRC_IPS_BASE_ADDR(X) + 0x28)
+#define DDRC_PWRCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x30)
+#define DDRC_PWRTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x34)
+#define DDRC_HWLPCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x38)
+#define DDRC_HWFFCCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x3c)
+#define DDRC_HWFFCSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x40)
+#define DDRC_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x50)
+#define DDRC_RFSHCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x54)
+#define DDRC_RFSHCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x58)
+#define DDRC_RFSHCTL3(X) (DDRC_IPS_BASE_ADDR(X) + 0x60)
+#define DDRC_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x64)
+#define DDRC_ECCCFG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x70)
+#define DDRC_ECCCFG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x74)
+#define DDRC_ECCSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x78)
+#define DDRC_ECCCLR(X) (DDRC_IPS_BASE_ADDR(X) + 0x7c)
+#define DDRC_ECCERRCNT(X) (DDRC_IPS_BASE_ADDR(X) + 0x80)
+#define DDRC_ECCCADDR0(X) (DDRC_IPS_BASE_ADDR(X) + 0x84)
+#define DDRC_ECCCADDR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x88)
+#define DDRC_ECCCSYN0(X) (DDRC_IPS_BASE_ADDR(X) + 0x8c)
+#define DDRC_ECCCSYN1(X) (DDRC_IPS_BASE_ADDR(X) + 0x90)
+#define DDRC_ECCCSYN2(X) (DDRC_IPS_BASE_ADDR(X) + 0x94)
+#define DDRC_ECCBITMASK0(X) (DDRC_IPS_BASE_ADDR(X) + 0x98)
+#define DDRC_ECCBITMASK1(X) (DDRC_IPS_BASE_ADDR(X) + 0x9c)
+#define DDRC_ECCBITMASK2(X) (DDRC_IPS_BASE_ADDR(X) + 0xa0)
+#define DDRC_ECCUADDR0(X) (DDRC_IPS_BASE_ADDR(X) + 0xa4)
+#define DDRC_ECCUADDR1(X) (DDRC_IPS_BASE_ADDR(X) + 0xa8)
+#define DDRC_ECCUSYN0(X) (DDRC_IPS_BASE_ADDR(X) + 0xac)
+#define DDRC_ECCUSYN1(X) (DDRC_IPS_BASE_ADDR(X) + 0xb0)
+#define DDRC_ECCUSYN2(X) (DDRC_IPS_BASE_ADDR(X) + 0xb4)
+#define DDRC_ECCPOISONADDR0(X) (DDRC_IPS_BASE_ADDR(X) + 0xb8)
+#define DDRC_ECCPOISONADDR1(X) (DDRC_IPS_BASE_ADDR(X) + 0xbc)
+#define DDRC_CRCPARCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0xc0)
+#define DDRC_CRCPARCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0xc4)
+#define DDRC_CRCPARCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0xc8)
+#define DDRC_CRCPARSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0xcc)
+#define DDRC_INIT0(X) (DDRC_IPS_BASE_ADDR(X) + 0xd0)
+#define DDRC_INIT1(X) (DDRC_IPS_BASE_ADDR(X) + 0xd4)
+#define DDRC_INIT2(X) (DDRC_IPS_BASE_ADDR(X) + 0xd8)
+#define DDRC_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0xdc)
+#define DDRC_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0xe0)
+#define DDRC_INIT5(X) (DDRC_IPS_BASE_ADDR(X) + 0xe4)
+#define DDRC_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0xe8)
+#define DDRC_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0xec)
+#define DDRC_DIMMCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0xf0)
+#define DDRC_RANKCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0xf4)
+#define DDRC_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x100)
+#define DDRC_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x104)
+#define DDRC_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x108)
+#define DDRC_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x10c)
+#define DDRC_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x110)
+#define DDRC_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x114)
+#define DDRC_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x118)
+#define DDRC_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x11c)
+#define DDRC_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x120)
+#define DDRC_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x124)
+#define DDRC_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x128)
+#define DDRC_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x12c)
+#define DDRC_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x130)
+#define DDRC_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x134)
+#define DDRC_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x138)
+#define DDRC_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x13C)
+#define DDRC_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x140)
+#define DDRC_DRAMTMG17(X) (DDRC_IPS_BASE_ADDR(X) + 0x144)
+
+#define DDRC_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x180)
+#define DDRC_ZQCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x184)
+#define DDRC_ZQCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x188)
+#define DDRC_ZQSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x18c)
+#define DDRC_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x190)
+#define DDRC_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x194)
+#define DDRC_DFILPCFG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x198)
+#define DDRC_DFILPCFG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x19c)
+#define DDRC_DFIUPD0(X) (DDRC_IPS_BASE_ADDR(X) + 0x1a0)
+#define DDRC_DFIUPD1(X) (DDRC_IPS_BASE_ADDR(X) + 0x1a4)
+#define DDRC_DFIUPD2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1a8)
+#define DDRC_DFIMISC(X) (DDRC_IPS_BASE_ADDR(X) + 0x1b0)
+#define DDRC_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1b4)
+#define DDRC_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x1b8)
+#define DDRC_DFISTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x1bc)
+
+#define DDRC_DBICTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x1c0)
+#define DDRC_DFIPHYMSTR(X) (DDRC_IPS_BASE_ADDR(X) + 0x1c4)
+#define DDRC_TRAINCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x1d0)
+#define DDRC_TRAINCTL1(X) (DDRC_IPS_BASE_ADDR(X) + 0x1d4)
+#define DDRC_TRAINCTL2(X) (DDRC_IPS_BASE_ADDR(X) + 0x1d8)
+#define DDRC_TRAINSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x1dc)
+#define DDRC_ADDRMAP0(X) (DDRC_IPS_BASE_ADDR(X) + 0x200)
+#define DDRC_ADDRMAP1(X) (DDRC_IPS_BASE_ADDR(X) + 0x204)
+#define DDRC_ADDRMAP2(X) (DDRC_IPS_BASE_ADDR(X) + 0x208)
+#define DDRC_ADDRMAP3(X) (DDRC_IPS_BASE_ADDR(X) + 0x20c)
+#define DDRC_ADDRMAP4(X) (DDRC_IPS_BASE_ADDR(X) + 0x210)
+#define DDRC_ADDRMAP5(X) (DDRC_IPS_BASE_ADDR(X) + 0x214)
+#define DDRC_ADDRMAP6(X) (DDRC_IPS_BASE_ADDR(X) + 0x218)
+#define DDRC_ADDRMAP7(X) (DDRC_IPS_BASE_ADDR(X) + 0x21c)
+#define DDRC_ADDRMAP8(X) (DDRC_IPS_BASE_ADDR(X) + 0x220)
+#define DDRC_ADDRMAP9(X) (DDRC_IPS_BASE_ADDR(X) + 0x224)
+#define DDRC_ADDRMAP10(X) (DDRC_IPS_BASE_ADDR(X) + 0x228)
+#define DDRC_ADDRMAP11(X) (DDRC_IPS_BASE_ADDR(X) + 0x22c)
+
+#define DDRC_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x240)
+#define DDRC_ODTMAP(X) (DDRC_IPS_BASE_ADDR(X) + 0x244)
+#define DDRC_SCHED(X) (DDRC_IPS_BASE_ADDR(X) + 0x250)
+#define DDRC_SCHED1(X) (DDRC_IPS_BASE_ADDR(X) + 0x254)
+#define DDRC_PERFHPR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x25c)
+#define DDRC_PERFLPR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x264)
+#define DDRC_PERFWR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x26c)
+#define DDRC_PERFVPR1(X) (DDRC_IPS_BASE_ADDR(X) + 0x274)
+
+#define DDRC_PERFVPW1(X) (DDRC_IPS_BASE_ADDR(X) + 0x278)
+
+#define DDRC_DQMAP0(X) (DDRC_IPS_BASE_ADDR(X) + 0x280)
+#define DDRC_DQMAP1(X) (DDRC_IPS_BASE_ADDR(X) + 0x284)
+#define DDRC_DQMAP2(X) (DDRC_IPS_BASE_ADDR(X) + 0x288)
+#define DDRC_DQMAP3(X) (DDRC_IPS_BASE_ADDR(X) + 0x28c)
+#define DDRC_DQMAP4(X) (DDRC_IPS_BASE_ADDR(X) + 0x290)
+#define DDRC_DQMAP5(X) (DDRC_IPS_BASE_ADDR(X) + 0x294)
+#define DDRC_DBG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x300)
+#define DDRC_DBG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x304)
+#define DDRC_DBGCAM(X) (DDRC_IPS_BASE_ADDR(X) + 0x308)
+#define DDRC_DBGCMD(X) (DDRC_IPS_BASE_ADDR(X) + 0x30c)
+#define DDRC_DBGSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x310)
+
+#define DDRC_SWCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x320)
+#define DDRC_SWSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x324)
+#define DDRC_OCPARCFG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x330)
+#define DDRC_OCPARCFG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x334)
+#define DDRC_OCPARCFG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x338)
+#define DDRC_OCPARCFG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x33c)
+#define DDRC_OCPARSTAT0(X) (DDRC_IPS_BASE_ADDR(X) + 0x340)
+#define DDRC_OCPARSTAT1(X) (DDRC_IPS_BASE_ADDR(X) + 0x344)
+#define DDRC_OCPARWLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x348)
+#define DDRC_OCPARWLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x34c)
+#define DDRC_OCPARWLOG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x350)
+#define DDRC_OCPARAWLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x354)
+#define DDRC_OCPARAWLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x358)
+#define DDRC_OCPARRLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x35c)
+#define DDRC_OCPARRLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x360)
+#define DDRC_OCPARARLOG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x364)
+#define DDRC_OCPARARLOG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x368)
+#define DDRC_POISONCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x36C)
+#define DDRC_POISONSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x370)
+#define DDRC_ADVECCINDEX(X) (DDRC_IPS_BASE_ADDR(X) + 0x3)
+#define DDRC_ADVECCSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x3)
+#define DDRC_ECCPOISONPAT0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3)
+#define DDRC_ECCPOISONPAT1(X) (DDRC_IPS_BASE_ADDR(X) + 0x3)
+#define DDRC_ECCPOISONPAT2(X) (DDRC_IPS_BASE_ADDR(X) + 0x3)
+#define DDRC_HIFCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0x3)
+
+#define DDRC_PSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0x3fc)
+#define DDRC_PCCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x400)
+#define DDRC_PCFGR_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x404)
+#define DDRC_PCFGR_1(X) (DDRC_IPS_BASE_ADDR(X) + 1 * 0xb0 + 0x404)
+#define DDRC_PCFGR_2(X) (DDRC_IPS_BASE_ADDR(X) + 2 * 0xb0 + 0x404)
+#define DDRC_PCFGR_3(X) (DDRC_IPS_BASE_ADDR(X) + 3 * 0xb0 + 0x404)
+#define DDRC_PCFGW_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x408)
+#define DDRC_PCFGW_1(X) (DDRC_IPS_BASE_ADDR(X) + 1 * 0xb0 + 0x408)
+#define DDRC_PCFGW_2(X) (DDRC_IPS_BASE_ADDR(X) + 2 * 0xb0 + 0x408)
+#define DDRC_PCFGW_3(X) (DDRC_IPS_BASE_ADDR(X) + 3 * 0xb0 + 0x408)
+#define DDRC_PCFGC_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x40c)
+#define DDRC_PCFGIDMASKCH(X) (DDRC_IPS_BASE_ADDR(X) + 0x410)
+#define DDRC_PCFGIDVALUECH(X) (DDRC_IPS_BASE_ADDR(X) + 0x414)
+#define DDRC_PCTRL_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x490)
+#define DDRC_PCTRL_1(X) (DDRC_IPS_BASE_ADDR(X) + 0x490 + 1 * 0xb0)
+#define DDRC_PCTRL_2(X) (DDRC_IPS_BASE_ADDR(X) + 0x490 + 2 * 0xb0)
+#define DDRC_PCTRL_3(X) (DDRC_IPS_BASE_ADDR(X) + 0x490 + 3 * 0xb0)
+#define DDRC_PCFGQOS0_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x494)
+#define DDRC_PCFGQOS1_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x498)
+#define DDRC_PCFGWQOS0_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x49c)
+#define DDRC_PCFGWQOS1_0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4a0)
+#define DDRC_SARBASE0(X) (DDRC_IPS_BASE_ADDR(X) + 0xf04)
+#define DDRC_SARSIZE0(X) (DDRC_IPS_BASE_ADDR(X) + 0xf08)
+#define DDRC_SBRCTL(X) (DDRC_IPS_BASE_ADDR(X) + 0xf24)
+#define DDRC_SBRSTAT(X) (DDRC_IPS_BASE_ADDR(X) + 0xf28)
+#define DDRC_SBRWDATA0(X) (DDRC_IPS_BASE_ADDR(X) + 0xf2c)
+#define DDRC_SBRWDATA1(X) (DDRC_IPS_BASE_ADDR(X) + 0xf30)
+#define DDRC_PDCH(X) (DDRC_IPS_BASE_ADDR(X) + 0xf34)
+
+/* SHADOW registers */
+#define DDRC_FREQ1_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x2020)
+#define DDRC_FREQ1_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x2024)
+#define DDRC_FREQ1_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2050)
+#define DDRC_FREQ1_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x2064)
+#define DDRC_FREQ1_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0x20dc)
+#define DDRC_FREQ1_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0x20e0)
+#define DDRC_FREQ1_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0x20e8)
+#define DDRC_FREQ1_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0x20ec)
+#define DDRC_FREQ1_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2100)
+#define DDRC_FREQ1_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x2104)
+#define DDRC_FREQ1_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x2108)
+#define DDRC_FREQ1_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x210c)
+#define DDRC_FREQ1_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x2110)
+#define DDRC_FREQ1_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x2114)
+#define DDRC_FREQ1_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x2118)
+#define DDRC_FREQ1_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x211c)
+#define DDRC_FREQ1_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x2120)
+#define DDRC_FREQ1_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x2124)
+#define DDRC_FREQ1_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x2128)
+#define DDRC_FREQ1_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x212c)
+#define DDRC_FREQ1_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x2130)
+#define DDRC_FREQ1_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x2134)
+#define DDRC_FREQ1_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x2138)
+#define DDRC_FREQ1_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x213C)
+#define DDRC_FREQ1_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x2140)
+#define DDRC_FREQ1_DRAMTMG17(X) (DDRC_IPS_BASE_ADDR(X) + 0x2144)
+#define DDRC_FREQ1_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2180)
+#define DDRC_FREQ1_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x2190)
+#define DDRC_FREQ1_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x2194)
+#define DDRC_FREQ1_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b4)
+#define DDRC_FREQ1_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b8)
+#define DDRC_FREQ1_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x2240)
+
+#define DDRC_FREQ2_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x3020)
+#define DDRC_FREQ2_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x3024)
+#define DDRC_FREQ2_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3050)
+#define DDRC_FREQ2_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x3064)
+#define DDRC_FREQ2_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0x30dc)
+#define DDRC_FREQ2_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0x30e0)
+#define DDRC_FREQ2_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0x30e8)
+#define DDRC_FREQ2_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0x30ec)
+#define DDRC_FREQ2_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3100)
+#define DDRC_FREQ2_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x3104)
+#define DDRC_FREQ2_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x3108)
+#define DDRC_FREQ2_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x310c)
+#define DDRC_FREQ2_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x3110)
+#define DDRC_FREQ2_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x3114)
+#define DDRC_FREQ2_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x3118)
+#define DDRC_FREQ2_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x311c)
+#define DDRC_FREQ2_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x3120)
+#define DDRC_FREQ2_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x3124)
+#define DDRC_FREQ2_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x3128)
+#define DDRC_FREQ2_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x312c)
+#define DDRC_FREQ2_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x3130)
+#define DDRC_FREQ2_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x3134)
+#define DDRC_FREQ2_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x3138)
+#define DDRC_FREQ2_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x313C)
+#define DDRC_FREQ2_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x3140)
+#define DDRC_FREQ2_DRAMTMG17(X) (DDRC_IPS_BASE_ADDR(X) + 0x3144)
+#define DDRC_FREQ2_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3180)
+#define DDRC_FREQ2_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x3190)
+#define DDRC_FREQ2_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x3194)
+#define DDRC_FREQ2_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x31b4)
+#define DDRC_FREQ2_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x31b8)
+#define DDRC_FREQ2_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x3240)
+
+#define DDRC_FREQ3_DERATEEN(X) (DDRC_IPS_BASE_ADDR(X) + 0x4020)
+#define DDRC_FREQ3_DERATEINT(X) (DDRC_IPS_BASE_ADDR(X) + 0x4024)
+#define DDRC_FREQ3_RFSHCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4050)
+#define DDRC_FREQ3_RFSHTMG(X) (DDRC_IPS_BASE_ADDR(X) + 0x4064)
+#define DDRC_FREQ3_INIT3(X) (DDRC_IPS_BASE_ADDR(X) + 0x40dc)
+#define DDRC_FREQ3_INIT4(X) (DDRC_IPS_BASE_ADDR(X) + 0x40e0)
+#define DDRC_FREQ3_INIT6(X) (DDRC_IPS_BASE_ADDR(X) + 0x40e8)
+#define DDRC_FREQ3_INIT7(X) (DDRC_IPS_BASE_ADDR(X) + 0x40ec)
+#define DDRC_FREQ3_DRAMTMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4100)
+#define DDRC_FREQ3_DRAMTMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x4104)
+#define DDRC_FREQ3_DRAMTMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x4108)
+#define DDRC_FREQ3_DRAMTMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x410c)
+#define DDRC_FREQ3_DRAMTMG4(X) (DDRC_IPS_BASE_ADDR(X) + 0x4110)
+#define DDRC_FREQ3_DRAMTMG5(X) (DDRC_IPS_BASE_ADDR(X) + 0x4114)
+#define DDRC_FREQ3_DRAMTMG6(X) (DDRC_IPS_BASE_ADDR(X) + 0x4118)
+#define DDRC_FREQ3_DRAMTMG7(X) (DDRC_IPS_BASE_ADDR(X) + 0x411c)
+#define DDRC_FREQ3_DRAMTMG8(X) (DDRC_IPS_BASE_ADDR(X) + 0x4120)
+#define DDRC_FREQ3_DRAMTMG9(X) (DDRC_IPS_BASE_ADDR(X) + 0x4124)
+#define DDRC_FREQ3_DRAMTMG10(X) (DDRC_IPS_BASE_ADDR(X) + 0x4128)
+#define DDRC_FREQ3_DRAMTMG11(X) (DDRC_IPS_BASE_ADDR(X) + 0x412c)
+#define DDRC_FREQ3_DRAMTMG12(X) (DDRC_IPS_BASE_ADDR(X) + 0x4130)
+#define DDRC_FREQ3_DRAMTMG13(X) (DDRC_IPS_BASE_ADDR(X) + 0x4134)
+#define DDRC_FREQ3_DRAMTMG14(X) (DDRC_IPS_BASE_ADDR(X) + 0x4138)
+#define DDRC_FREQ3_DRAMTMG15(X) (DDRC_IPS_BASE_ADDR(X) + 0x413C)
+#define DDRC_FREQ3_DRAMTMG16(X) (DDRC_IPS_BASE_ADDR(X) + 0x4140)
+
+#define DDRC_FREQ3_ZQCTL0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4180)
+#define DDRC_FREQ3_DFITMG0(X) (DDRC_IPS_BASE_ADDR(X) + 0x4190)
+#define DDRC_FREQ3_DFITMG1(X) (DDRC_IPS_BASE_ADDR(X) + 0x4194)
+#define DDRC_FREQ3_DFITMG2(X) (DDRC_IPS_BASE_ADDR(X) + 0x41b4)
+#define DDRC_FREQ3_DFITMG3(X) (DDRC_IPS_BASE_ADDR(X) + 0x41b8)
+#define DDRC_FREQ3_ODTCFG(X) (DDRC_IPS_BASE_ADDR(X) + 0x4240)
+#define DDRC_DFITMG0_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x2190)
+#define DDRC_DFITMG1_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x2194)
+#define DDRC_DFITMG2_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b4)
+#define DDRC_DFITMG3_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x21b8)
+#define DDRC_ODTCFG_SHADOW(X) (DDRC_IPS_BASE_ADDR(X) + 0x2240)
+
+#define DRC_PERF_MON_BASE_ADDR(X) (0x3d800000 + ((X) * 0x2000000))
+#define DRC_PERF_MON_CNT0_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x0)
+#define DRC_PERF_MON_CNT1_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x4)
+#define DRC_PERF_MON_CNT2_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x8)
+#define DRC_PERF_MON_CNT3_CTL(X) (DRC_PERF_MON_BASE_ADDR(X) + 0xC)
+#define DRC_PERF_MON_CNT0_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x20)
+#define DRC_PERF_MON_CNT1_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x24)
+#define DRC_PERF_MON_CNT2_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x28)
+#define DRC_PERF_MON_CNT3_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x2C)
+#define DRC_PERF_MON_DPCR_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x30)
+#define DRC_PERF_MON_MRR0_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x40)
+#define DRC_PERF_MON_MRR1_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x44)
+#define DRC_PERF_MON_MRR2_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x48)
+#define DRC_PERF_MON_MRR3_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x4C)
+#define DRC_PERF_MON_MRR4_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x50)
+#define DRC_PERF_MON_MRR5_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x54)
+#define DRC_PERF_MON_MRR6_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x58)
+#define DRC_PERF_MON_MRR7_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x5C)
+#define DRC_PERF_MON_MRR8_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x60)
+#define DRC_PERF_MON_MRR9_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x64)
+#define DRC_PERF_MON_MRR10_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x68)
+#define DRC_PERF_MON_MRR11_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x6C)
+#define DRC_PERF_MON_MRR12_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x70)
+#define DRC_PERF_MON_MRR13_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x74)
+#define DRC_PERF_MON_MRR14_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x78)
+#define DRC_PERF_MON_MRR15_DAT(X) (DRC_PERF_MON_BASE_ADDR(X) + 0x7C)
+
+#define dwc_ddrphy_apb_rd(addr) mmio_read_32(IMX_DDRPHY_BASE + 4 * (addr))
+#define dwc_ddrphy_apb_wr(addr, val) mmio_write_32(IMX_DDRPHY_BASE + 4 * (addr), val)
+
+#endif /*IMX_DDRC_H */
diff --git a/plat/imx/imx8m/include/dram.h b/plat/imx/imx8m/include/dram.h
new file mode 100644
index 0000000000..719c390637
--- /dev/null
+++ b/plat/imx/imx8m/include/dram.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DRAM_H
+#define DRAM_H
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <lib/utils_def.h>
+
+#include <ddrc.h>
+#include <platform_def.h>
+
+#define DDRC_LPDDR4 BIT(5)
+#define DDRC_DDR4 BIT(4)
+#define DDRC_DDR3L BIT(0)
+#define DDR_TYPE_MASK U(0x3f)
+#define ACTIVE_RANK_MASK U(0x3)
+#define DDRC_ACTIVE_ONE_RANK U(0x1)
+#define DDRC_ACTIVE_TWO_RANK U(0x2)
+
+#define MR12 U(12)
+#define MR14 U(14)
+
+#define MAX_FSP_NUM U(3)
+
+/* reg & config param */
+struct dram_cfg_param {
+ unsigned int reg;
+ unsigned int val;
+};
+
+struct dram_timing_info {
+ /* umctl2 config */
+ struct dram_cfg_param *ddrc_cfg;
+ unsigned int ddrc_cfg_num;
+ /* ddrphy config */
+ struct dram_cfg_param *ddrphy_cfg;
+ unsigned int ddrphy_cfg_num;
+ /* ddr fsp train info */
+ struct dram_fsp_msg *fsp_msg;
+ unsigned int fsp_msg_num;
+ /* ddr phy trained CSR */
+ struct dram_cfg_param *ddrphy_trained_csr;
+ unsigned int ddrphy_trained_csr_num;
+ /* ddr phy PIE */
+ struct dram_cfg_param *ddrphy_pie;
+ unsigned int ddrphy_pie_num;
+ /* initialized fsp table */
+ unsigned int fsp_table[4];
+};
+
+struct dram_info {
+ int dram_type;
+ unsigned int num_rank;
+ uint32_t num_fsp;
+ int current_fsp;
+ int boot_fsp;
+ bool bypass_mode;
+ struct dram_timing_info *timing_info;
+ /* mr, emr, emr2, emr3, mr11, mr12, mr22, mr14 */
+ uint32_t mr_table[3][8];
+ /* used for workaround for rank to rank issue */
+ uint32_t rank_setting[3][3];
+};
+
+extern struct dram_info dram_info;
+
+void dram_info_init(unsigned long dram_timing_base);
+void dram_umctl2_init(struct dram_timing_info *timing);
+void dram_phy_init(struct dram_timing_info *timing);
+
+/* dram retention */
+void dram_enter_retention(void);
+void dram_exit_retention(void);
+
+void dram_clock_switch(unsigned int target_drate, bool bypass_mode);
+
+/* dram frequency change */
+void lpddr4_swffc(struct dram_info *info, unsigned int init_fsp, unsigned int fsp_index);
+void ddr4_swffc(struct dram_info *dram_info, unsigned int pstate);
+
+#endif /* DRAM_H */
diff --git a/plat/imx/imx8m/include/gpc.h b/plat/imx/imx8m/include/gpc.h
index 29b8ecf93a..8eb3e06bca 100644
--- a/plat/imx/imx8m/include/gpc.h
+++ b/plat/imx/imx8m/include/gpc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2018-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -24,6 +24,7 @@
#define COREx_LPM_PUP(core_id) ((core_id) < 2 ? (1 << ((core_id) * 2 + 9)) : (1 << ((core_id) * 2 + 21)))
#define SLTx_CFG(n) ((SLT0_CFG + ((n) * 4)))
#define SLT_COREx_PUP(core_id) (0x2 << ((core_id) * 2))
+#define SLT_COREx_PUP_ACK(core_id) ((core_id) < 2 ? (1 << ((core_id) + 16)) : (1 << ((core_id) + 27)))
#define IMR_MASK_ALL 0xffffffff
@@ -54,6 +55,11 @@ struct imx_pwr_domain {
bool always_on;
};
+struct pll_override {
+ uint32_t reg;
+ uint32_t override_mask;
+};
+
DECLARE_BAKERY_LOCK(gpc_lock);
/* function declare */
@@ -69,5 +75,12 @@ void imx_set_sys_wakeup(unsigned int last_core, bool pdn);
void imx_set_sys_lpm(unsigned last_core, bool retention);
void imx_set_rbc_count(void);
void imx_clear_rbc_count(void);
+void imx_anamix_override(bool enter);
+void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on);
+
+#if defined(PLAT_imx8mq)
+void imx_gpc_set_a53_core_awake(uint32_t core_id);
+void imx_gpc_core_wake(uint32_t cpumask);
+#endif
#endif /*IMX8M_GPC_H */
diff --git a/plat/imx/imx8m/include/imx8m_ccm.h b/plat/imx/imx8m/include/imx8m_ccm.h
new file mode 100644
index 0000000000..acbd135cb3
--- /dev/null
+++ b/plat/imx/imx8m/include/imx8m_ccm.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2023, Pengutronix. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8M_CCM_H
+#define IMX8M_CCM_H
+
+unsigned int imx8m_uart_get_base(void);
+
+#endif /* IMX8M_CCM_H */
diff --git a/plat/imx/imx8m/include/imx8m_csu.h b/plat/imx/imx8m/include/imx8m_csu.h
new file mode 100644
index 0000000000..3851e91359
--- /dev/null
+++ b/plat/imx/imx8m/include/imx8m_csu.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2020-2022 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX_CSU_H
+#define IMX_CSU_H
+
+#include <lib/utils_def.h>
+
+#include <platform_def.h>
+
+#define CSU_SEC_LEVEL_0 0xff
+#define CSU_SEC_LEVEL_1 0xbb
+#define CSU_SEC_LEVEL_2 0x3f
+#define CSU_SEC_LEVEL_3 0x3b
+#define CSU_SEC_LEVEL_4 0x33
+#define CSU_SEC_LEVEL_5 0x22
+#define CSU_SEC_LEVEL_6 0x03
+#define CSU_SEC_LEVEL_7 0x0
+
+#define SEC_ACCESS 0x0
+#define NON_SEC_ACCESS 0x1
+
+#define LOCKED 0x1
+#define UNLOCKED 0x0
+
+#define CSLx_REG(x) (IMX_CSU_BASE + ((x) / 2) * 4)
+#define CSLx_LOCK(x) ((0x1 << (((x) % 2) * 16 + 8)))
+#define CSLx_CFG(x, n) ((x) << (((n) % 2) * 16))
+
+#define CSU_HP_REG(x) (IMX_CSU_BASE + (((x) / 16) * 4) + 0x200)
+#define CSU_HP_LOCK(x) ((0x1 << (((x) % 16) * 2 + 1)))
+#define CSU_HP_CFG(x, n) ((x) << (((n) % 16) * 2))
+
+#define CSU_SA_REG(x) (IMX_CSU_BASE + (((x) / 16) * 4) + 0x218)
+#define CSU_SA_LOCK(x) ((0x1 << (((x) % 16) * 2 + 1)))
+#define CSU_SA_CFG(x, n) ((x) << (((n) % 16) * 2))
+
+#define CSU_HPCONTROL_REG(x) (IMX_CSU_BASE + (((x) / 16) * 4) + 0x358)
+#define CSU_HPCONTROL_LOCK(x) ((0x1 << (((x) % 16) * 2 + 1)))
+#define CSU_HPCONTROL_CFG(x, n) ((x) << (((n) % 16) * 2))
+
+enum csu_cfg_type {
+ CSU_INVALID,
+ CSU_CSL,
+ CSU_HP,
+ CSU_SA,
+ CSU_HPCONTROL,
+};
+
+struct imx_csu_cfg {
+ enum csu_cfg_type type;
+ uint16_t idx;
+ uint16_t lock : 1;
+ uint16_t csl_level : 8;
+ uint16_t hp : 1;
+ uint16_t sa : 1;
+ uint16_t hpctrl : 1;
+};
+
+#define CSU_CSLx(i, level, lk) \
+ {CSU_CSL, .idx = (i), .csl_level = (level), .lock = (lk),}
+
+#define CSU_HPx(i, val, lk) \
+ {CSU_HP, .idx = (i), .hp = (val), .lock = (lk), }
+
+#define CSU_SA(i, val, lk) \
+ {CSU_SA, .idx = (i), .sa = (val), .lock = (lk), }
+
+#define CSU_HPCTRL(i, val, lk) \
+ {CSU_HPCONTROL, .idx = (i), .hpctrl = (val), .lock = (lk), }
+
+void imx_csu_init(const struct imx_csu_cfg *csu_cfg);
+
+#endif /* IMX_CSU_H */
diff --git a/plat/imx/imx8m/include/imx8m_measured_boot.h b/plat/imx/imx8m/include/imx8m_measured_boot.h
new file mode 100644
index 0000000000..2ec0c46463
--- /dev/null
+++ b/plat/imx/imx8m/include/imx8m_measured_boot.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2022, Linaro
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8M_MEASURED_BOOT_H
+#define IMX8M_MEASURED_BOOT_H
+
+#include <stdint.h>
+
+#include <arch_helpers.h>
+
+int imx8m_set_nt_fw_info(size_t log_size, uintptr_t *ns_log_addr);
+
+#endif /* IMX8M_MEASURED_BOOT_H */
diff --git a/plat/imx/imx8m/include/imx8m_snvs.h b/plat/imx/imx8m/include/imx8m_snvs.h
new file mode 100644
index 0000000000..799e1d558a
--- /dev/null
+++ b/plat/imx/imx8m/include/imx8m_snvs.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8M_SNVS_H
+#define IMX8M_SNVS_H
+
+void enable_snvs_privileged_access(void);
+
+#endif
diff --git a/plat/imx/imx8m/include/imx_rdc.h b/plat/imx/imx8m/include/imx_rdc.h
index e25b0e6d49..a6e10a7b75 100644
--- a/plat/imx/imx8m/include/imx_rdc.h
+++ b/plat/imx/imx8m/include/imx_rdc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, NXP. All rights reserved.
+ * Copyright (c) 2019-2022 NXP. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,6 +9,7 @@
#include <lib/utils_def.h>
+#include <imx_sec_def.h>
#include <platform_def.h>
#define MDAn(x) (IMX_RDC_BASE + 0x200 + (x) * 4)
diff --git a/plat/imx/imx8qm/imx8qm_bl31_setup.c b/plat/imx/imx8qm/imx8qm_bl31_setup.c
index 4ca6a5db4b..bd7896a997 100644
--- a/plat/imx/imx8qm/imx8qm_bl31_setup.c
+++ b/plat/imx/imx8qm/imx8qm_bl31_setup.c
@@ -1,10 +1,12 @@
/*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
+#include <inttypes.h>
+#include <stdint.h>
#include <stdbool.h>
#include <platform_def.h>
@@ -17,7 +19,7 @@
#include <drivers/console.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/mmio.h>
-#include <lib/xlat_tables/xlat_tables.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/platform.h>
#include <imx8qm_pads.h>
@@ -60,7 +62,7 @@ static entry_point_info_t bl33_image_ep_info;
#error "Provide proper UART number in IMX_DEBUG_UART"
#endif
-const static int imx8qm_cci_map[] = {
+static const int imx8qm_cci_map[] = {
CLUSTER0_CCI_SLVAE_IFACE,
CLUSTER1_CCI_SLVAE_IFACE
};
@@ -261,14 +263,14 @@ void mx8_partition_resources(void)
err = sc_rm_get_memreg_info(ipc_handle, mr, &start, &end);
if (err)
ERROR("Memreg get info failed, %u\n", mr);
- NOTICE("Memreg %u 0x%llx -- 0x%llx\n", mr, start, end);
+ NOTICE("Memreg %u 0x%" PRIx64 " -- 0x%" PRIx64 "\n", mr, start, end);
if (BL31_BASE >= start && (BL31_LIMIT - 1) <= end) {
mr_record = mr; /* Record the mr for ATF running */
} else {
err = sc_rm_assign_memreg(ipc_handle, os_part, mr);
if (err)
- ERROR("Memreg assign failed, 0x%llx -- 0x%llx, \
- err %d\n", start, end, err);
+ ERROR("Memreg assign failed, 0x%" PRIx64 " -- 0x%" PRIx64 ", \
+ err %d\n", start, end, err);
}
}
}
@@ -280,23 +282,23 @@ void mx8_partition_resources(void)
if ((BL31_LIMIT - 1) < end) {
err = sc_rm_memreg_alloc(ipc_handle, &mr, BL31_LIMIT, end);
if (err)
- ERROR("sc_rm_memreg_alloc failed, 0x%llx -- 0x%llx\n",
- (sc_faddr_t)BL31_LIMIT, end);
+ ERROR("sc_rm_memreg_alloc failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ (sc_faddr_t)BL31_LIMIT, end);
err = sc_rm_assign_memreg(ipc_handle, os_part, mr);
if (err)
- ERROR("Memreg assign failed, 0x%llx -- 0x%llx\n",
- (sc_faddr_t)BL31_LIMIT, end);
+ ERROR("Memreg assign failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ (sc_faddr_t)BL31_LIMIT, end);
}
if (start < (BL31_BASE - 1)) {
err = sc_rm_memreg_alloc(ipc_handle, &mr, start, BL31_BASE - 1);
if (err)
- ERROR("sc_rm_memreg_alloc failed, 0x%llx -- 0x%llx\n",
- start, (sc_faddr_t)BL31_BASE - 1);
+ ERROR("sc_rm_memreg_alloc failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ start, (sc_faddr_t)BL31_BASE - 1);
err = sc_rm_assign_memreg(ipc_handle, os_part, mr);
if (err)
- ERROR("Memreg assign failed, 0x%llx -- 0x%llx\n",
- start, (sc_faddr_t)BL31_BASE - 1);
+ ERROR("Memreg assign failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ start, (sc_faddr_t)BL31_BASE - 1);
}
}
diff --git a/plat/imx/imx8qm/imx8qm_psci.c b/plat/imx/imx8qm/imx8qm_psci.c
index bdba37c6eb..dcc502ff89 100644
--- a/plat/imx/imx8qm/imx8qm_psci.c
+++ b/plat/imx/imx8qm/imx8qm_psci.c
@@ -26,7 +26,7 @@
#define SYSTEM_PWR_STATE(state) \
((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
-const static int ap_core_index[PLATFORM_CORE_COUNT] = {
+static const int ap_core_index[PLATFORM_CORE_COUNT] = {
SC_R_A53_0, SC_R_A53_1, SC_R_A53_2,
SC_R_A53_3, SC_R_A72_0, SC_R_A72_1,
};
diff --git a/plat/imx/imx8qm/platform.mk b/plat/imx/imx8qm/platform.mk
index f35fa00203..c57edbe2af 100644
--- a/plat/imx/imx8qm/platform.mk
+++ b/plat/imx/imx8qm/platform.mk
@@ -1,9 +1,12 @@
#
-# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# Translation tables library
+include lib/xlat_tables_v2/xlat_tables.mk
+
PLAT_INCLUDES := -Iplat/imx/imx8qm/include \
-Iplat/imx/common/include \
@@ -23,11 +26,10 @@ BL31_SOURCES += plat/imx/common/lpuart_console.S \
plat/imx/common/imx8_psci.c \
plat/imx/common/imx_sip_svc.c \
plat/imx/common/imx_sip_handler.c \
- lib/xlat_tables/aarch64/xlat_tables.c \
- lib/xlat_tables/xlat_tables_common.c \
lib/cpus/aarch64/cortex_a53.S \
lib/cpus/aarch64/cortex_a72.S \
drivers/arm/cci/cci.c \
+ ${XLAT_TABLES_LIB_SRCS} \
${IMX_GIC_SOURCES} \
include plat/imx/common/sci/sci_api.mk
diff --git a/plat/imx/imx8qx/imx8qx_bl31_setup.c b/plat/imx/imx8qx/imx8qx_bl31_setup.c
index 3ff5400170..13e80fb371 100644
--- a/plat/imx/imx8qx/imx8qx_bl31_setup.c
+++ b/plat/imx/imx8qx/imx8qx_bl31_setup.c
@@ -1,11 +1,13 @@
/*
- * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
+#include <inttypes.h>
#include <stdbool.h>
+#include <stdint.h>
#include <platform_def.h>
@@ -17,7 +19,7 @@
#include <drivers/console.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/mmio.h>
-#include <lib/xlat_tables/xlat_tables.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
#include <plat/common/platform.h>
#include <imx8qx_pads.h>
@@ -49,6 +51,16 @@ static entry_point_info_t bl33_image_ep_info;
#define IMX_PAD_UART_RX SC_P_UART0_RX
#define IMX_PAD_UART_TX SC_P_UART0_TX
+#elif defined(IMX_USE_UART1)
+#define UART_PAD_CTRL (PADRING_IFMUX_EN_MASK | PADRING_GP_EN_MASK | \
+ (SC_PAD_CONFIG_OUT_IN << PADRING_CONFIG_SHIFT) | \
+ (SC_PAD_ISO_OFF << PADRING_LPCONFIG_SHIFT) | \
+ (SC_PAD_28FDSOI_DSE_DV_LOW << PADRING_DSE_SHIFT) | \
+ (SC_PAD_28FDSOI_PS_PD << PADRING_PULL_SHIFT))
+#define IMX_RES_UART SC_R_UART_1
+#define IMX_PAD_UART_RX SC_P_UART1_RX
+#define IMX_PAD_UART_TX SC_P_UART1_TX
+
/*
* On Toradex Colibri i.MX8QXP UART3 on the FLEXCAN2.
* Use custom pad control for this
@@ -238,14 +250,14 @@ void imx8_partition_resources(void)
if (err)
ERROR("Memreg get info failed, %u\n", mr);
- NOTICE("Memreg %u 0x%llx -- 0x%llx\n", mr, start, end);
+ NOTICE("Memreg %u 0x%" PRIx64 " -- 0x%" PRIx64 "\n", mr, start, end);
if (BL31_BASE >= start && (BL31_LIMIT - 1) <= end) {
mr_record = mr; /* Record the mr for ATF running */
} else {
err = sc_rm_assign_memreg(ipc_handle, os_part, mr);
if (err)
- ERROR("Memreg assign failed, 0x%llx -- 0x%llx, \
- err %d\n", start, end, err);
+ ERROR("Memreg assign failed, 0x%" PRIx64 " -- 0x%" PRIx64 ", \
+ err %d\n", start, end, err);
}
}
}
@@ -257,23 +269,23 @@ void imx8_partition_resources(void)
if ((BL31_LIMIT - 1) < end) {
err = sc_rm_memreg_alloc(ipc_handle, &mr, BL31_LIMIT, end);
if (err)
- ERROR("sc_rm_memreg_alloc failed, 0x%llx -- 0x%llx\n",
- (sc_faddr_t)BL31_LIMIT, end);
+ ERROR("sc_rm_memreg_alloc failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ (sc_faddr_t)BL31_LIMIT, end);
err = sc_rm_assign_memreg(ipc_handle, os_part, mr);
if (err)
- ERROR("Memreg assign failed, 0x%llx -- 0x%llx\n",
- (sc_faddr_t)BL31_LIMIT, end);
+ ERROR("Memreg assign failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ (sc_faddr_t)BL31_LIMIT, end);
}
if (start < (BL31_BASE - 1)) {
err = sc_rm_memreg_alloc(ipc_handle, &mr, start, BL31_BASE - 1);
if (err)
- ERROR("sc_rm_memreg_alloc failed, 0x%llx -- 0x%llx\n",
- start, (sc_faddr_t)BL31_BASE - 1);
+ ERROR("sc_rm_memreg_alloc failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ start, (sc_faddr_t)BL31_BASE - 1);
err = sc_rm_assign_memreg(ipc_handle, os_part, mr);
if (err)
- ERROR("Memreg assign failed, 0x%llx -- 0x%llx\n",
- start, (sc_faddr_t)BL31_BASE - 1);
+ ERROR("Memreg assign failed, 0x%" PRIx64 " -- 0x%" PRIx64 "\n",
+ start, (sc_faddr_t)BL31_BASE - 1);
}
}
diff --git a/plat/imx/imx8qx/imx8qx_psci.c b/plat/imx/imx8qx/imx8qx_psci.c
index aab3a2dae8..5f0556665e 100644
--- a/plat/imx/imx8qx/imx8qx_psci.c
+++ b/plat/imx/imx8qx/imx8qx_psci.c
@@ -18,7 +18,7 @@
#include "../../common/sci/imx8_mu.h"
-const static int ap_core_index[PLATFORM_CORE_COUNT] = {
+static const int ap_core_index[PLATFORM_CORE_COUNT] = {
SC_R_A35_0, SC_R_A35_1, SC_R_A35_2, SC_R_A35_3
};
diff --git a/plat/imx/imx8qx/include/platform_def.h b/plat/imx/imx8qx/include/platform_def.h
index b880e1bc73..29f6f7c7fe 100644
--- a/plat/imx/imx8qx/include/platform_def.h
+++ b/plat/imx/imx8qx/include/platform_def.h
@@ -41,6 +41,8 @@
#if defined(IMX_USE_UART0)
#define IMX_BOOT_UART_BASE 0x5a060000
+#elif defined(IMX_USE_UART1)
+#define IMX_BOOT_UART_BASE 0x5a070000
#elif defined(IMX_USE_UART3)
#define IMX_BOOT_UART_BASE 0x5a090000
#else
diff --git a/plat/imx/imx8qx/platform.mk b/plat/imx/imx8qx/platform.mk
index b25be07424..85b5f3dc42 100644
--- a/plat/imx/imx8qx/platform.mk
+++ b/plat/imx/imx8qx/platform.mk
@@ -1,9 +1,12 @@
#
-# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2022, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# Translation tables library
+include lib/xlat_tables_v2/xlat_tables.mk
+
PLAT_INCLUDES := -Iplat/imx/imx8qx/include \
-Iplat/imx/common/include \
@@ -23,9 +26,8 @@ BL31_SOURCES += plat/imx/common/lpuart_console.S \
plat/imx/common/imx_sip_svc.c \
plat/imx/common/imx_sip_handler.c \
plat/common/plat_psci_common.c \
- lib/xlat_tables/xlat_tables_common.c \
- lib/xlat_tables/aarch64/xlat_tables.c \
lib/cpus/aarch64/cortex_a35.S \
+ ${XLAT_TABLES_LIB_SRCS} \
${IMX_GIC_SOURCES} \
include plat/imx/common/sci/sci_api.mk
diff --git a/plat/imx/imx8ulp/apd_context.c b/plat/imx/imx8ulp/apd_context.c
new file mode 100644
index 0000000000..54b87958d8
--- /dev/null
+++ b/plat/imx/imx8ulp/apd_context.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include <plat_imx8.h>
+#include <xrdc.h>
+
+#define PCC_PR BIT(31)
+#define PFD_VALID_MASK U(0x40404040)
+
+#define S400_MU_BASE U(0x27020000)
+#define S400_MU_RSR (S400_MU_BASE + 0x12c)
+#define S400_MU_TRx(i) (S400_MU_BASE + 0x200 + (i) * 4)
+#define S400_MU_RRx(i) (S400_MU_BASE + 0x280 + (i) * 4)
+
+/*
+ * need to re-init the PLL, CGC1, PCC, CMC, XRDC, SIM, GPIO etc.
+ * init the PLL &PFD first, then switch the CA35 clock to PLL for
+ * performance consideration, restore other bus fabric clock.
+ */
+
+extern void imx8ulp_caam_init(void);
+extern void upower_wait_resp(void);
+extern void dram_enter_retention(void);
+extern void dram_exit_retention(void);
+
+struct plat_gic_ctx imx_gicv3_ctx;
+static uint32_t cmc1_pmprot;
+static uint32_t cmc1_srie;
+
+/* TPM5: global timer */
+static uint32_t tpm5[3];
+
+static uint32_t wdog3[2];
+
+/* CGC1 PLL2 */
+uint32_t pll2[][2] = {
+ {0x292c0510, 0x0}, {0x292c0518, 0x0}, {0x292c051c, 0x0},
+ {0x292c0520, 0x0}, {0x292c0500, 0x0},
+};
+
+/* CGC1 PLL3 */
+uint32_t pll3[][2] = {
+ {0x292c0604, 0x0}, {0x292c0608, 0x0}, {0x292c060c, 0x0},
+ {0x292c0610, 0x0}, {0x292c0618, 0x0}, {0x292c061c, 0x0},
+ {0x292c0620, 0x0}, {0x292c0624, 0x0}, {0x292c0600, 0x0},
+ {0x292c0614, 0x0},
+};
+
+/* CGC1 others */
+uint32_t cgc1[][2] = {
+ {0x292c0014, 0x0}, {0x292c0034, 0x0}, {0x292c0038, 0x0},
+ {0x292c0108, 0x0}, {0x292c0208, 0x0}, {0x292c0700, 0x0},
+ {0x292c0810, 0x0}, {0x292c0900, 0x0}, {0x292c0904, 0x0},
+ {0x292c0908, 0x0}, {0x292c090c, 0x0}, {0x292c0a00, 0x0},
+};
+
+static uint32_t pcc3[61];
+static uint32_t pcc4[32];
+
+static uint32_t pcc5_0[33];
+static uint32_t pcc5_1[][2] = {
+ {0x2da70084, 0x0}, {0x2da70088, 0x0}, {0x2da7008c, 0x0},
+ {0x2da700a0, 0x0}, {0x2da700a4, 0x0}, {0x2da700a8, 0x0},
+ {0x2da700ac, 0x0}, {0x2da700b0, 0x0}, {0x2da700b4, 0x0},
+ {0x2da700bc, 0x0}, {0x2da700c0, 0x0}, {0x2da700c8, 0x0},
+ {0x2da700cc, 0x0}, {0x2da700d0, 0x0}, {0x2da700f0, 0x0},
+ {0x2da700f4, 0x0}, {0x2da700f8, 0x0}, {0x2da70108, 0x0},
+ {0x2da7010c, 0x0}, {0x2da70110, 0x0}, {0x2da70114, 0x0},
+};
+
+static uint32_t cgc2[][2] = {
+ {0x2da60014, 0x0}, {0x2da60020, 0x0}, {0x2da6003c, 0x0},
+ {0x2da60040, 0x0}, {0x2da60108, 0x0}, {0x2da60208, 0x0},
+ {0x2da60900, 0x0}, {0x2da60904, 0x0}, {0x2da60908, 0x0},
+ {0x2da60910, 0x0}, {0x2da60a00, 0x0},
+};
+
+static uint32_t pll4[][2] = {
+ {0x2da60604, 0x0}, {0x2da60608, 0x0}, {0x2da6060c, 0x0},
+ {0x2da60610, 0x0}, {0x2da60618, 0x0}, {0x2da6061c, 0x0},
+ {0x2da60620, 0x0}, {0x2da60624, 0x0}, {0x2da60600, 0x0},
+ {0x2da60614, 0x0},
+};
+
+static uint32_t lpav_sim[][2] = {
+ {0x2da50000, 0x0}, {0x2da50004, 0x0}, {0x2da50008, 0x0},
+ {0x2da5001c, 0x0}, {0x2da50020, 0x0}, {0x2da50024, 0x0},
+ {0x2da50034, 0x0},
+};
+
+#define APD_GPIO_CTRL_NUM 2
+#define LPAV_GPIO_CTRL_NUM 1
+#define GPIO_CTRL_REG_NUM 8
+#define GPIO_PIN_MAX_NUM 32
+#define GPIO_CTX(addr, num) \
+ {.base = (addr), .pin_num = (num), }
+
+struct gpio_ctx {
+ /* gpio base */
+ uintptr_t base;
+ /* port control */
+ uint32_t port_ctrl[GPIO_CTRL_REG_NUM];
+ /* GPIO ICR, Max 32 */
+ uint32_t pin_num;
+ uint32_t gpio_icr[GPIO_PIN_MAX_NUM];
+};
+
+static uint32_t gpio_ctrl_offset[GPIO_CTRL_REG_NUM] = {
+ 0xc, 0x10, 0x14, 0x18, 0x1c, 0x40, 0x54, 0x58
+};
+static struct gpio_ctx apd_gpio_ctx[APD_GPIO_CTRL_NUM] = {
+ GPIO_CTX(IMX_GPIOE_BASE, 24),
+ GPIO_CTX(IMX_GPIOF_BASE, 32),
+};
+
+static struct gpio_ctx lpav_gpio_ctx = GPIO_CTX(IMX_GPIOD_BASE, 24);
+/* iomuxc setting */
+#define IOMUXC_SECTION_NUM 8
+struct iomuxc_section {
+ uint32_t offset;
+ uint32_t reg_num;
+};
+
+struct iomuxc_section iomuxc_sections[IOMUXC_SECTION_NUM] = {
+ {.offset = IOMUXC_PTD_PCR_BASE, .reg_num = 24},
+ {.offset = IOMUXC_PTE_PCR_BASE, .reg_num = 24},
+ {.offset = IOMUXC_PTF_PCR_BASE, .reg_num = 32},
+ {.offset = IOMUXC_PSMI_BASE0, .reg_num = 10},
+ {.offset = IOMUXC_PSMI_BASE1, .reg_num = 61},
+ {.offset = IOMUXC_PSMI_BASE2, .reg_num = 12},
+ {.offset = IOMUXC_PSMI_BASE3, .reg_num = 20},
+ {.offset = IOMUXC_PSMI_BASE4, .reg_num = 75},
+};
+static uint32_t iomuxc_ctx[258];
+
+#define PORTS_NUM 3U
+void apd_io_pad_off(void)
+{
+ unsigned int i, j;
+
+ /* off the PTD/E/F, need to be customized based on actual user case */
+ for (i = 0; i < PORTS_NUM; i++) {
+ for (j = 0; j < iomuxc_sections[i].reg_num; j++) {
+ mmio_write_32(iomuxc_sections[i].offset + j * 4, 0);
+ }
+ }
+
+ /* disable the PTD compensation */
+ mmio_write_32(IMX_SIM1_BASE + 0x48, 0x800);
+}
+
+void iomuxc_save(void)
+{
+ unsigned int i, j;
+ unsigned int index = 0U;
+
+ for (i = 0U; i < IOMUXC_SECTION_NUM; i++) {
+ for (j = 0U; j < iomuxc_sections[i].reg_num; j++) {
+ iomuxc_ctx[index++] = mmio_read_32(iomuxc_sections[i].offset + j * 4);
+ }
+ }
+
+ apd_io_pad_off();
+}
+
+void iomuxc_restore(void)
+{
+ unsigned int i, j;
+ unsigned int index = 0U;
+
+ for (i = 0U; i < IOMUXC_SECTION_NUM; i++) {
+ for (j = 0U; j < iomuxc_sections[i].reg_num; j++) {
+ mmio_write_32(iomuxc_sections[i].offset + j * 4, iomuxc_ctx[index++]);
+ }
+ }
+}
+
+void gpio_save(struct gpio_ctx *ctx, int port_num)
+{
+ unsigned int i, j;
+
+ for (i = 0U; i < port_num; i++) {
+ /* save the port control setting */
+ for (j = 0U; j < GPIO_CTRL_REG_NUM; j++) {
+ if (j < 4U) {
+ ctx->port_ctrl[j] = mmio_read_32(ctx->base + gpio_ctrl_offset[j]);
+ /*
+ * clear the permission setting to read the GPIO
+ * non-secure world setting.
+ */
+ mmio_write_32(ctx->base + gpio_ctrl_offset[j], 0x0);
+ } else {
+ ctx->port_ctrl[j] = mmio_read_32(ctx->base + gpio_ctrl_offset[j]);
+ }
+ }
+ /* save the gpio icr setting */
+ for (j = 0U; j < ctx->pin_num; j++) {
+ ctx->gpio_icr[j] = mmio_read_32(ctx->base + 0x80 + j * 4);
+ }
+
+ ctx++;
+ }
+}
+
+void gpio_restore(struct gpio_ctx *ctx, int port_num)
+{
+ unsigned int i, j;
+
+ for (i = 0U; i < port_num; i++) {
+ for (j = 0U; j < ctx->pin_num; j++)
+ mmio_write_32(ctx->base + 0x80 + j * 4, ctx->gpio_icr[j]);
+
+ for (j = 4U; j < GPIO_CTRL_REG_NUM; j++) {
+ mmio_write_32(ctx->base + gpio_ctrl_offset[j], ctx->port_ctrl[j]);
+ }
+
+ /* permission config retore last */
+ for (j = 0U; j < 4; j++) {
+ mmio_write_32(ctx->base + gpio_ctrl_offset[j], ctx->port_ctrl[j]);
+ }
+
+ ctx++;
+ }
+}
+
+void cgc1_save(void)
+{
+ unsigned int i;
+
+ /* PLL2 */
+ for (i = 0U; i < ARRAY_SIZE(pll2); i++) {
+ pll2[i][1] = mmio_read_32(pll2[i][0]);
+ }
+
+ /* PLL3 */
+ for (i = 0U; i < ARRAY_SIZE(pll3); i++) {
+ pll3[i][1] = mmio_read_32(pll3[i][0]);
+ }
+
+ /* CGC1 others */
+ for (i = 0U; i < ARRAY_SIZE(cgc1); i++) {
+ cgc1[i][1] = mmio_read_32(cgc1[i][0]);
+ }
+}
+
+void cgc1_restore(void)
+{
+ unsigned int i;
+
+ /* PLL2 */
+ for (i = 0U; i < ARRAY_SIZE(pll2); i++) {
+ mmio_write_32(pll2[i][0], pll2[i][1]);
+ }
+ /* wait for PLL2 lock */
+ while (!(mmio_read_32(pll2[4][0]) & BIT(24))) {
+ ;
+ }
+
+ /* PLL3 */
+ for (i = 0U; i < 9U; i++) {
+ mmio_write_32(pll3[i][0], pll3[i][1]);
+ }
+
+ /* wait for PLL3 lock */
+ while (!(mmio_read_32(pll3[4][0]) & BIT(24))) {
+ ;
+ }
+
+ /* restore the PFDs */
+ mmio_write_32(pll3[9][0], pll3[9][1] & ~(BIT(31) | BIT(23) | BIT(15) | BIT(7)));
+ mmio_write_32(pll3[9][0], pll3[9][1]);
+
+ /* wait for the PFD is stable, only need to check the enabled PFDs */
+ while (!(mmio_read_32(pll3[9][0]) & PFD_VALID_MASK)) {
+ ;
+ }
+
+ /* CGC1 others */
+ for (i = 0U; i < ARRAY_SIZE(cgc1); i++) {
+ mmio_write_32(cgc1[i][0], cgc1[i][1]);
+ }
+}
+
+void tpm5_save(void)
+{
+ tpm5[0] = mmio_read_32(IMX_TPM5_BASE + 0x10);
+ tpm5[1] = mmio_read_32(IMX_TPM5_BASE + 0x18);
+ tpm5[2] = mmio_read_32(IMX_TPM5_BASE + 0x20);
+}
+
+void tpm5_restore(void)
+{
+ mmio_write_32(IMX_TPM5_BASE + 0x10, tpm5[0]);
+ mmio_write_32(IMX_TPM5_BASE + 0x18, tpm5[1]);
+ mmio_write_32(IMX_TPM5_BASE + 0x20, tpm5[2]);
+}
+
+void wdog3_save(void)
+{
+ /* enable wdog3 clock */
+ mmio_write_32(IMX_PCC3_BASE + 0xa8, 0xd2800000);
+
+ /* save the CS & TOVAL regiter */
+ wdog3[0] = mmio_read_32(IMX_WDOG3_BASE);
+ wdog3[1] = mmio_read_32(IMX_WDOG3_BASE + 0x8);
+}
+
+void wdog3_restore(void)
+{
+ /* enable wdog3 clock */
+ mmio_write_32(IMX_PCC3_BASE + 0xa8, 0xd2800000);
+
+ /* reconfig the CS */
+ mmio_write_32(IMX_WDOG3_BASE, wdog3[0]);
+ /* set the tiemout value */
+ mmio_write_32(IMX_WDOG3_BASE + 0x8, wdog3[1]);
+
+ /* wait for the lock status */
+ while ((mmio_read_32(IMX_WDOG3_BASE) & BIT(11))) {
+ ;
+ }
+
+ /* wait for the config done */
+ while (!(mmio_read_32(IMX_WDOG3_BASE) & BIT(10))) {
+ ;
+ }
+}
+
+static uint32_t lpuart_regs[4];
+#define LPUART_BAUD 0x10
+#define LPUART_CTRL 0x18
+#define LPUART_FIFO 0x28
+#define LPUART_WATER 0x2c
+
+void lpuart_save(void)
+{
+ lpuart_regs[0] = mmio_read_32(IMX_LPUART5_BASE + LPUART_BAUD);
+ lpuart_regs[1] = mmio_read_32(IMX_LPUART5_BASE + LPUART_FIFO);
+ lpuart_regs[2] = mmio_read_32(IMX_LPUART5_BASE + LPUART_WATER);
+ lpuart_regs[3] = mmio_read_32(IMX_LPUART5_BASE + LPUART_CTRL);
+}
+
+void lpuart_restore(void)
+{
+ mmio_write_32(IMX_LPUART5_BASE + LPUART_BAUD, lpuart_regs[0]);
+ mmio_write_32(IMX_LPUART5_BASE + LPUART_FIFO, lpuart_regs[1]);
+ mmio_write_32(IMX_LPUART5_BASE + LPUART_WATER, lpuart_regs[2]);
+ mmio_write_32(IMX_LPUART5_BASE + LPUART_CTRL, lpuart_regs[3]);
+}
+
+bool is_lpav_owned_by_apd(void)
+{
+ return (mmio_read_32(0x2802b044) & BIT(7)) ? true : false;
+}
+
+void lpav_ctx_save(void)
+{
+ unsigned int i;
+ uint32_t val;
+
+ /* CGC2 save */
+ for (i = 0U; i < ARRAY_SIZE(cgc2); i++) {
+ cgc2[i][1] = mmio_read_32(cgc2[i][0]);
+ }
+
+ /* PLL4 */
+ for (i = 0U; i < ARRAY_SIZE(pll4); i++) {
+ pll4[i][1] = mmio_read_32(pll4[i][0]);
+ }
+
+ /* PCC5 save */
+ for (i = 0U; i < ARRAY_SIZE(pcc5_0); i++) {
+ val = mmio_read_32(IMX_PCC5_BASE + i * 4);
+ if (val & PCC_PR) {
+ pcc5_0[i] = val;
+ }
+ }
+
+ for (i = 0U; i < ARRAY_SIZE(pcc5_1); i++) {
+ val = mmio_read_32(pcc5_1[i][0]);
+ if (val & PCC_PR) {
+ pcc5_1[i][1] = val;
+ }
+ }
+
+ /* LPAV SIM save */
+ for (i = 0U; i < ARRAY_SIZE(lpav_sim); i++) {
+ lpav_sim[i][1] = mmio_read_32(lpav_sim[i][0]);
+ }
+
+ /* Save GPIO port D */
+ gpio_save(&lpav_gpio_ctx, LPAV_GPIO_CTRL_NUM);
+
+ /* put DDR into retention */
+ dram_enter_retention();
+}
+
+void lpav_ctx_restore(void)
+{
+ unsigned int i;
+
+ /* PLL4 */
+ for (i = 0U; i < 9U; i++) {
+ mmio_write_32(pll4[i][0], pll4[i][1]);
+ }
+
+ /* wait for PLL4 lock */
+ while (!(mmio_read_32(pll4[8][0]) & BIT(24))) {
+ ;
+ }
+
+ /* restore the PLL4 PFDs */
+ mmio_write_32(pll4[9][0], pll4[9][1] & ~(BIT(31) | BIT(23) | BIT(15) | BIT(7)));
+ mmio_write_32(pll4[9][0], pll4[9][1]);
+
+ /* wait for the PFD is stable */
+ while (!(mmio_read_32(pll4[9][0]) & PFD_VALID_MASK)) {
+ ;
+ }
+
+ /* CGC2 restore */
+ for (i = 0U; i < ARRAY_SIZE(cgc2); i++) {
+ mmio_write_32(cgc2[i][0], cgc2[i][1]);
+ }
+
+ /* PCC5 restore */
+ for (i = 0U; i < ARRAY_SIZE(pcc5_0); i++) {
+ if (pcc5_0[i] & PCC_PR) {
+ mmio_write_32(IMX_PCC5_BASE + i * 4, pcc5_0[i]);
+ }
+ }
+
+ for (i = 0U; i < ARRAY_SIZE(pcc5_1); i++) {
+ if (pcc5_1[i][1] & PCC_PR) {
+ mmio_write_32(pcc5_1[i][0], pcc5_1[i][1]);
+ }
+ }
+
+ /* LPAV_SIM */
+ for (i = 0U; i < ARRAY_SIZE(lpav_sim); i++) {
+ mmio_write_32(lpav_sim[i][0], lpav_sim[i][1]);
+ }
+
+ gpio_restore(&lpav_gpio_ctx, LPAV_GPIO_CTRL_NUM);
+ /* DDR retention exit */
+ dram_exit_retention();
+}
+
+void imx_apd_ctx_save(unsigned int proc_num)
+{
+ unsigned int i;
+ uint32_t val;
+
+ /* enable LPUART5's clock by default */
+ mmio_setbits_32(IMX_PCC3_BASE + 0xe8, BIT(30));
+
+ /* save the gic config */
+ plat_gic_save(proc_num, &imx_gicv3_ctx);
+
+ cmc1_pmprot = mmio_read_32(IMX_CMC1_BASE + 0x18);
+ cmc1_srie = mmio_read_32(IMX_CMC1_BASE + 0x8c);
+
+ /* save the PCC3 */
+ for (i = 0U; i < ARRAY_SIZE(pcc3); i++) {
+ /* save the pcc if it is exist */
+ val = mmio_read_32(IMX_PCC3_BASE + i * 4);
+ if (val & PCC_PR) {
+ pcc3[i] = val;
+ }
+ }
+
+ /* save the PCC4 */
+ for (i = 0U; i < ARRAY_SIZE(pcc4); i++) {
+ /* save the pcc if it is exist */
+ val = mmio_read_32(IMX_PCC4_BASE + i * 4);
+ if (val & PCC_PR) {
+ pcc4[i] = val;
+ }
+ }
+
+ /* save the CGC1 */
+ cgc1_save();
+
+ wdog3_save();
+
+ gpio_save(apd_gpio_ctx, APD_GPIO_CTRL_NUM);
+
+ iomuxc_save();
+
+ tpm5_save();
+
+ lpuart_save();
+
+ /*
+ * save the lpav ctx & put the ddr into retention
+ * if lpav master is assigned to APD domain.
+ */
+ if (is_lpav_owned_by_apd()) {
+ lpav_ctx_save();
+ }
+}
+
+void xrdc_reinit(void)
+{
+ xrdc_apply_apd_config();
+ xrdc_apply_lpav_config();
+
+ xrdc_enable();
+}
+
+void s400_release_caam(void)
+{
+ uint32_t msg, resp;
+
+ mmio_write_32(S400_MU_TRx(0), 0x17d70206);
+ mmio_write_32(S400_MU_TRx(1), 0x7);
+
+ do {
+ resp = mmio_read_32(S400_MU_RSR);
+ } while ((resp & 0x3) != 0x3);
+
+ msg = mmio_read_32(S400_MU_RRx(0));
+ resp = mmio_read_32(S400_MU_RRx(1));
+
+ VERBOSE("resp %x; %x", msg, resp);
+}
+
+void imx_apd_ctx_restore(unsigned int proc_num)
+{
+ unsigned int i;
+
+ /* restore the CCG1 */
+ cgc1_restore();
+
+ for (i = 0U; i < ARRAY_SIZE(pcc3); i++) {
+ /* save the pcc if it is exist */
+ if (pcc3[i] & PCC_PR) {
+ mmio_write_32(IMX_PCC3_BASE + i * 4, pcc3[i]);
+ }
+ }
+
+ for (i = 0U; i < ARRAY_SIZE(pcc4); i++) {
+ if (pcc4[i] & PCC_PR) {
+ mmio_write_32(IMX_PCC4_BASE + i * 4, pcc4[i]);
+ }
+ }
+
+ wdog3_restore();
+
+ iomuxc_restore();
+
+ tpm5_restore();
+
+ xrdc_reinit();
+
+ /* Restore GPIO after xrdc_reinit, otherwise MSCs are invalid */
+ gpio_restore(apd_gpio_ctx, APD_GPIO_CTRL_NUM);
+
+ /* restore the gic config */
+ plat_gic_restore(proc_num, &imx_gicv3_ctx);
+
+ mmio_write_32(IMX_CMC1_BASE + 0x18, cmc1_pmprot);
+ mmio_write_32(IMX_CMC1_BASE + 0x8c, cmc1_srie);
+
+ /* enable LPUART5's clock by default */
+ mmio_setbits_32(IMX_PCC3_BASE + 0xe8, BIT(30));
+
+ /* restore the console lpuart */
+ lpuart_restore();
+
+ /* FIXME: make uart work for ATF */
+ mmio_write_32(IMX_LPUART_BASE + 0x18, 0xc0000);
+
+ /* Allow M core to reset A core */
+ mmio_clrbits_32(IMX_MU0B_BASE + 0x10, BIT(2));
+ /*
+ * Ask S400 to release caam to APD as it is owned by s400
+ */
+ s400_release_caam();
+
+ /* re-init the caam */
+ imx8ulp_caam_init();
+
+ /*
+ * ack the upower, seems a necessary steps, otherwise the upower can
+ * not response to the new API service call. put this just before the
+ * ddr retention exit because that the dram retention exit flow need to
+ * communicate with upower.
+ */
+ upower_wait_resp();
+
+ /*
+ * restore the lpav ctx & make ddr out of retention
+ * if lpav master is assigned to APD domain.
+ */
+ if (is_lpav_owned_by_apd()) {
+ lpav_ctx_restore();
+ }
+}
+
+#define DGO_CTRL1 U(0xc)
+#define USB_WAKEUP U(0x44)
+#define USB1_PHY_DPD_WAKEUP_EN BIT_32(5)
+#define USB0_PHY_DPD_WAKEUP_EN BIT_32(4)
+#define USB1_PHY_WAKEUP_ISO_DISABLE BIT_32(1)
+#define USB0_PHY_WAKEUP_ISO_DISABLE BIT_32(0)
+
+void usb_wakeup_enable(bool enable)
+{
+ if (enable) {
+ mmio_setbits_32(IMX_SIM1_BASE + USB_WAKEUP,
+ USB1_PHY_WAKEUP_ISO_DISABLE | USB0_PHY_WAKEUP_ISO_DISABLE);
+ mmio_setbits_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(0));
+ while (!(mmio_read_32(IMX_SIM1_BASE + DGO_CTRL1) & BIT(1))) {
+ ;
+ }
+
+ mmio_clrbits_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(0));
+ mmio_write_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(1));
+
+ /* Need to delay for a while to make sure the wakeup logic can work */
+ udelay(500);
+
+ mmio_setbits_32(IMX_SIM1_BASE + USB_WAKEUP,
+ USB1_PHY_DPD_WAKEUP_EN | USB0_PHY_DPD_WAKEUP_EN);
+ mmio_setbits_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(0));
+ while (!(mmio_read_32(IMX_SIM1_BASE + DGO_CTRL1) & BIT(1))) {
+ ;
+ }
+
+ mmio_clrbits_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(0));
+ mmio_write_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(1));
+ } else {
+ /*
+ * USBx_PHY_DPD_WAKEUP_EN should be cleared before USB0_PHY_WAKEUP_ISO_DISABLE
+ * to provide the correct the wake-up functionality.
+ */
+ mmio_write_32(IMX_SIM1_BASE + USB_WAKEUP, USB1_PHY_WAKEUP_ISO_DISABLE |
+ USB0_PHY_WAKEUP_ISO_DISABLE);
+ mmio_write_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(0));
+ while (!(mmio_read_32(IMX_SIM1_BASE + DGO_CTRL1) & BIT(1))) {
+ ;
+ }
+
+ mmio_clrbits_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(0));
+ mmio_write_32(IMX_SIM1_BASE + DGO_CTRL1, BIT(1));
+ }
+}
diff --git a/plat/imx/imx8ulp/dram.c b/plat/imx/imx8ulp/dram.c
new file mode 100644
index 0000000000..00a5220821
--- /dev/null
+++ b/plat/imx/imx8ulp/dram.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <bl31/interrupt_mgmt.h>
+#include <common/runtime_svc.h>
+#include <lib/mmio.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+
+#include <platform_def.h>
+
+#include <dram.h>
+#include <upower_api.h>
+
+#define PHY_FREQ_SEL_INDEX(x) ((x) << 16)
+#define PHY_FREQ_MULTICAST_EN(x) ((x) << 8)
+#define DENALI_PHY_1537 U(0x5804)
+
+#define IMX_DDRC_BASE U(0x2E060000)
+#define SAVED_DRAM_DATA_BASE U(0x20055000)
+#define DENALI_CTL_143 U(0x23C)
+#define DENALI_CTL_144 U(0x240)
+#define DENALI_CTL_146 U(0x248)
+#define LP_STATE_CS_IDLE U(0x404000)
+#define LP_STATE_CS_PD_CG U(0x4F4F00)
+#define LPI_WAKEUP_EN_SHIFT U(8)
+#define IMX_LPAV_SIM_BASE 0x2DA50000
+#define LPDDR_CTRL 0x14
+#define LPDDR_AUTO_LP_MODE_DISABLE BIT(24)
+#define SOC_LP_CMD_SHIFT U(15)
+#define LPDDR_CTRL2 0x18
+#define LPDDR_EN_CLKGATE (0x1<<17)
+#define LPDDR_MAX_CLKDIV_EN (0x1 << 16)
+#define LP_AUTO_ENTRY_EN 0x4
+#define LP_AUTO_EXIT_EN 0xF
+
+#define DENALI_CTL_00 U(0x0)
+#define DENALI_CTL_23 U(0x5c)
+#define DFIBUS_FREQ_INIT_SHIFT U(24)
+#define TSREF2PHYMSTR_SHIFT U(8)
+#define TSREF2PHYMSTR_MASK GENMASK(13, 8)
+
+#define DENALI_CTL_24 U(0x60)
+#define DENALI_CTL_25 U(0x64)
+
+#define DENALI_CTL_93 U(0x174)
+#define PWRUP_SREFRESH_EXIT BIT(0)
+
+#define DENALI_CTL_127 U(0x1fc)
+#define PHYMSTR_TRAIN_AFTER_INIT_COMPLETE BIT(16)
+
+#define DENALI_CTL_147 U(0x24c)
+#define DENALI_CTL_153 U(0x264)
+#define PCPCS_PD_EN BIT(8)
+
+#define DENALI_CTL_249 U(0x3E4)
+#define DENALI_CTL_266 U(0x428)
+
+#define DENALI_PHY_1547 U(0x582c)
+#define PHY_LP4_BOOT_DISABLE BIT(8)
+
+#define DENALI_PHY_1559 U(0x585c)
+#define DENALI_PHY_1590 U(0x58D8)
+
+#define DENALI_PI_00 U(0x2000)
+#define DENALI_PI_04 U(0x2010)
+#define DENALI_PI_52 U(0x20D0)
+#define DENALI_PI_26 U(0x2068)
+#define DENALI_PI_33 U(0x2084)
+#define DENALI_PI_65 U(0x2104)
+#define DENALI_PI_77 U(0x2134)
+#define DENALI_PI_134 U(0x2218)
+#define DENALI_PI_131 U(0x220C)
+#define DENALI_PI_132 U(0x2210)
+#define DENALI_PI_134 U(0x2218)
+#define DENALI_PI_137 U(0x2224)
+#define DENALI_PI_174 U(0x22B8)
+#define DENALI_PI_175 U(0x22BC)
+#define DENALI_PI_181 U(0x22D4)
+#define DENALI_PI_182 U(0x22D8)
+#define DENALI_PI_191 U(0x22FC)
+#define DENALI_PI_192 U(0x2300)
+#define DENALI_PI_212 U(0x2350)
+#define DENALI_PI_214 U(0x2358)
+#define DENALI_PI_217 U(0x2364)
+
+#define LPDDR3_TYPE U(0x7)
+#define LPDDR4_TYPE U(0xB)
+
+extern void upower_wait_resp(void);
+
+struct dram_cfg_param {
+ uint32_t reg;
+ uint32_t val;
+};
+
+struct dram_timing_info {
+ /* ddr controller config */
+ struct dram_cfg_param *ctl_cfg;
+ unsigned int ctl_cfg_num;
+ /* pi config */
+ struct dram_cfg_param *pi_cfg;
+ unsigned int pi_cfg_num;
+ /* phy freq1 config */
+ struct dram_cfg_param *phy_f1_cfg;
+ unsigned int phy_f1_cfg_num;
+ /* phy freq2 config */
+ struct dram_cfg_param *phy_f2_cfg;
+ unsigned int phy_f2_cfg_num;
+ /* automatic low power config */
+ struct dram_cfg_param *auto_lp_cfg;
+ unsigned int auto_lp_cfg_num;
+ /* initialized drate table */
+ unsigned int fsp_table[3];
+};
+
+#define CTL_NUM U(680)
+#define PI_NUM U(298)
+#define PHY_NUM U(1654)
+#define PHY_DIFF_NUM U(49)
+#define AUTO_LP_NUM U(3)
+struct dram_cfg {
+ uint32_t ctl_cfg[CTL_NUM];
+ uint32_t pi_cfg[PI_NUM];
+ uint32_t phy_full[PHY_NUM];
+ uint32_t phy_diff[PHY_DIFF_NUM];
+ uint32_t auto_lp_cfg[AUTO_LP_NUM];
+};
+
+struct dram_timing_info *info;
+struct dram_cfg *dram_timing_cfg;
+
+/* mark if dram cfg is already saved */
+static bool dram_cfg_saved;
+static bool dram_auto_lp_true;
+static uint32_t dram_class, dram_ctl_143;
+
+/* PHY register index for frequency diff */
+uint32_t freq_specific_reg_array[PHY_DIFF_NUM] = {
+90, 92, 93, 96, 97, 100, 101, 102, 103, 104, 114,
+346, 348, 349, 352, 353, 356, 357, 358, 359, 360,
+370, 602, 604, 605, 608, 609, 612, 613, 614, 615,
+616, 626, 858, 860, 861, 864, 865, 868, 869, 870,
+871, 872, 882, 1063, 1319, 1566, 1624, 1625
+};
+
+/* lock used for DDR DVFS */
+spinlock_t dfs_lock;
+static volatile uint32_t core_count;
+static volatile bool in_progress;
+static volatile bool sys_dvfs;
+static int num_fsp;
+
+static void ddr_init(void)
+{
+ unsigned int i;
+
+ /* restore the ddr ctl config */
+ for (i = 0U; i < CTL_NUM; i++) {
+ mmio_write_32(IMX_DDRC_BASE + i * 4, dram_timing_cfg->ctl_cfg[i]);
+ }
+
+ /* load the PI registers */
+ for (i = 0U; i < PI_NUM; i++) {
+ mmio_write_32(IMX_DDRC_BASE + 0x2000 + i * 4, dram_timing_cfg->pi_cfg[i]);
+ }
+
+
+ /* restore all PHY registers for all the fsp. */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PHY_1537, 0x100);
+ /* restore all the phy configs */
+ for (i = 0U; i < PHY_NUM; i++) {
+ /* skip the reserved registers space */
+ if (i >= 121U && i <= 255U) {
+ continue;
+ }
+ if (i >= 377U && i <= 511U) {
+ continue;
+ }
+ if (i >= 633U && i <= 767U) {
+ continue;
+ }
+ if (i >= 889U && i <= 1023U) {
+ continue;
+ }
+ if (i >= 1065U && i <= 1279U) {
+ continue;
+ }
+ if (i >= 1321U && i <= 1535U) {
+ continue;
+ }
+ mmio_write_32(IMX_DDRC_BASE + 0x4000 + i * 4, dram_timing_cfg->phy_full[i]);
+ }
+
+ if (dram_class == LPDDR4_TYPE) {
+ /* restore only the diff. */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PHY_1537, 0x0);
+ for (i = 0U; i < PHY_DIFF_NUM; i++) {
+ mmio_write_32(IMX_DDRC_BASE + 0x4000 + freq_specific_reg_array[i] * 4,
+ dram_timing_cfg->phy_diff[i]);
+ }
+ }
+
+ /* Re-enable MULTICAST mode */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PHY_1537, PHY_FREQ_MULTICAST_EN(1));
+}
+
+void dram_lp_auto_disable(void)
+{
+ uint32_t lp_auto_en;
+
+ dram_timing_cfg = (struct dram_cfg *)(SAVED_DRAM_DATA_BASE +
+ sizeof(struct dram_timing_info));
+ lp_auto_en = (mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_146) & (LP_AUTO_ENTRY_EN << 24));
+ /* Save initial config */
+ dram_ctl_143 = mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_143);
+
+ if (lp_auto_en && !dram_auto_lp_true) {
+ /* 0.a Save DDRC auto low-power mode parameter */
+ dram_timing_cfg->auto_lp_cfg[0] = mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_144);
+ dram_timing_cfg->auto_lp_cfg[1] = mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_147);
+ dram_timing_cfg->auto_lp_cfg[2] = mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_146);
+ /* Set LPI_SRPD_LONG_MCCLK_GATE_WAKEUP_F2 to Maximum */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_CTL_143, 0xF << 24);
+ /* 0.b Disable DDRC auto low-power mode interface */
+ mmio_clrbits_32(IMX_DDRC_BASE + DENALI_CTL_146, LP_AUTO_ENTRY_EN << 24);
+ /* 0.c Read any location to get DRAM out of Self-refresh */
+ mmio_read_32(DEVICE2_BASE);
+ /* 0.d Confirm DRAM is out of Self-refresh */
+ while ((mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_146) &
+ LP_STATE_CS_PD_CG) != LP_STATE_CS_IDLE) {
+ ;
+ }
+ /* 0.e Disable DDRC auto low-power exit */
+ mmio_clrbits_32(IMX_DDRC_BASE + DENALI_CTL_147, LP_AUTO_EXIT_EN);
+ /* dram low power mode flag */
+ dram_auto_lp_true = true;
+ }
+}
+
+void dram_lp_auto_enable(void)
+{
+ /* Switch back to Auto Low-power mode */
+ if (dram_auto_lp_true) {
+ /* 12.a Confirm DRAM is out of Self-refresh */
+ while ((mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_146) &
+ LP_STATE_CS_PD_CG) != LP_STATE_CS_IDLE) {
+ ;
+ }
+ /* 12.b Enable DDRC auto low-power exit */
+ /*
+ * 12.c TBC! : Set DENALI_CTL_144 [LPI_CTRL_REQ_EN[24]] and
+ * [DFI_LP_VERSION[16]] back to default settings = 1b'1.
+ */
+ /*
+ * 12.d Reconfigure DENALI_CTL_144 [LPI_WAKEUP_EN[5:0]] bit
+ * LPI_WAKEUP_EN[3] = 1b'1.
+ */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_144, dram_timing_cfg->auto_lp_cfg[0]);
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_147, dram_timing_cfg->auto_lp_cfg[1]);
+ /* 12.e Re-enable DDRC auto low-power mode interface */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_146, dram_timing_cfg->auto_lp_cfg[2]);
+ /* restore ctl config */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_143, dram_ctl_143);
+ /* dram low power mode flag */
+ dram_auto_lp_true = false;
+ }
+}
+
+void dram_enter_self_refresh(void)
+{
+ /* disable auto low power interface */
+ dram_lp_auto_disable();
+ /* 1. config the PCC_LPDDR4[SSADO] to 2b'11 for ACK domain 0/1's STOP */
+ mmio_setbits_32(IMX_PCC5_BASE + 0x108, 0x2 << 22);
+ /* 1.a Clock gate PCC_LPDDR4[CGC] and no software reset PCC_LPDDR4[SWRST] */
+ mmio_setbits_32(IMX_PCC5_BASE + 0x108, (BIT(30) | BIT(28)));
+
+ /*
+ * 2. Make sure the DENALI_CTL_144[LPI_WAKEUP_EN[5:0]] has the bit
+ * LPI_WAKEUP_EN[3] = 1b'1. This enables the option 'self-refresh
+ * long with mem and ctlr clk gating or self-refresh power-down long
+ * with mem and ctlr clk gating'
+ */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_CTL_144, BIT(3) << LPI_WAKEUP_EN_SHIFT);
+ /* TODO: Needed ? 2.a DENALI_CTL_144[LPI_TIMER_WAKEUP_F2] */
+ //mmio_setbits_32(IMX_DDRC_BASE + DENALI_CTL_144, BIT(0));
+
+ /*
+ * 3a. Config SIM_LPAV LPDDR_CTRL[LPDDR_AUTO_LP_MODE_DISABLE] to 1b'0(enable
+ * the logic to automatic handles low power entry/exit. This is the recommended
+ * option over handling through software.
+ * 3b. Config the SIM_LPAV LPDDR_CTRL[SOC_LP_CMD] to 6b'101001(encoding for
+ * self_refresh with both DDR controller and DRAM clock gate. THis is mandatory
+ * since LPPDR logic will be power gated).
+ */
+ mmio_clrbits_32(IMX_LPAV_SIM_BASE + LPDDR_CTRL, LPDDR_AUTO_LP_MODE_DISABLE);
+ mmio_clrsetbits_32(IMX_LPAV_SIM_BASE + LPDDR_CTRL,
+ 0x3f << SOC_LP_CMD_SHIFT, 0x29 << SOC_LP_CMD_SHIFT);
+ /* 3.c clock gate ddr controller */
+ mmio_setbits_32(IMX_LPAV_SIM_BASE + LPDDR_CTRL2, LPDDR_EN_CLKGATE);
+ /* 3.d lpddr max clk div en */
+ mmio_clrbits_32(IMX_LPAV_SIM_BASE + LPDDR_CTRL2, LPDDR_MAX_CLKDIV_EN);
+}
+
+void dram_exit_self_refresh(void)
+{
+ dram_lp_auto_enable();
+}
+
+void dram_enter_retention(void)
+{
+ unsigned int i;
+
+ dram_lp_auto_disable();
+
+ /* 1. config the PCC_LPDDR4[SSADO] to 2b'11 for ACK domain 0/1's STOP */
+ mmio_setbits_32(IMX_PCC5_BASE + 0x108, 0x2 << 22);
+
+ /*
+ * 2. Make sure the DENALI_CTL_144[LPI_WAKEUP_EN[5:0]] has the bit
+ * LPI_WAKEUP_EN[3] = 1b'1. This enables the option 'self-refresh
+ * long with mem and ctlr clk gating or self-refresh power-down
+ * long with mem and ctlr clk gating'
+ */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_CTL_144, BIT(3) << LPI_WAKEUP_EN_SHIFT);
+
+ /*
+ * 3a. Config SIM_LPAV LPDDR_CTRL[LPDDR_AUTO_LP_MODE_DISABLE] to 1b'0(enable
+ * the logic to automatic handles low power entry/exit. This is the recommended
+ * option over handling through software.
+ * 3b. Config the SIM_LPAV LPDDR_CTRL[SOC_LP_CMD] to 6b'101001(encoding for
+ * self_refresh with both DDR controller and DRAM clock gate. THis is mandatory
+ * since LPPDR logic will be power gated).
+ */
+ mmio_clrbits_32(IMX_LPAV_SIM_BASE + LPDDR_CTRL, LPDDR_AUTO_LP_MODE_DISABLE);
+ mmio_clrsetbits_32(IMX_LPAV_SIM_BASE + LPDDR_CTRL,
+ 0x3f << SOC_LP_CMD_SHIFT, 0x29 << SOC_LP_CMD_SHIFT);
+
+ /* Save DDR Controller & PHY config.
+ * Set PHY_FREQ_SEL_MULTICAST_EN=0 & PHY_FREQ_SEL_INDEX=1. Read and store all
+ * the PHY registers for F2 into phy_f1_cfg, then read/store the diff between
+ * F1 & F2 into phy_f2_cfg.
+ */
+ if (!dram_cfg_saved) {
+ info = (struct dram_timing_info *)SAVED_DRAM_DATA_BASE;
+ dram_timing_cfg = (struct dram_cfg *)(SAVED_DRAM_DATA_BASE +
+ sizeof(struct dram_timing_info));
+
+ /* get the dram type */
+ dram_class = mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_00);
+ dram_class = (dram_class >> 8) & 0xf;
+
+ /* save the ctl registers */
+ for (i = 0U; i < CTL_NUM; i++) {
+ dram_timing_cfg->ctl_cfg[i] = mmio_read_32(IMX_DDRC_BASE + i * 4);
+ }
+ dram_timing_cfg->ctl_cfg[0] = dram_timing_cfg->ctl_cfg[0] & 0xFFFFFFFE;
+
+ /* save the PI registers */
+ for (i = 0U; i < PI_NUM; i++) {
+ dram_timing_cfg->pi_cfg[i] = mmio_read_32(IMX_DDRC_BASE + 0x2000 + i * 4);
+ }
+ dram_timing_cfg->pi_cfg[0] = dram_timing_cfg->pi_cfg[0] & 0xFFFFFFFE;
+
+ /*
+ * Read and store all PHY registers. full array is a full
+ * copy for all the setpoint
+ */
+ if (dram_class == LPDDR4_TYPE) {
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PHY_1537, 0x10000);
+ for (i = 0U; i < PHY_NUM; i++) {
+ /* Make sure MULTICASE is enabled */
+ if (i == 1537U) {
+ dram_timing_cfg->phy_full[i] = 0x100;
+ } else {
+ dram_timing_cfg->phy_full[i] = mmio_read_32(IMX_DDRC_BASE + 0x4000 + i * 4);
+ }
+ }
+
+ /*
+ * set PHY_FREQ_SEL_MULTICAST_EN=0 & PHY_FREQ_SEL_INDEX=0.
+ * Read and store only the diff.
+ */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PHY_1537, 0x0);
+ /* save only the frequency based diff config to save memory */
+ for (i = 0U; i < PHY_DIFF_NUM; i++) {
+ dram_timing_cfg->phy_diff[i] = mmio_read_32(IMX_DDRC_BASE + 0x4000 +
+ freq_specific_reg_array[i] * 4);
+ }
+ } else {
+ /* LPDDR3, only f1 need to save */
+ for (i = 0U; i < info->phy_f1_cfg_num; i++) {
+ info->phy_f1_cfg[i].val = mmio_read_32(info->phy_f1_cfg[i].reg);
+ }
+ }
+
+ dram_cfg_saved = true;
+ }
+}
+
+void dram_exit_retention(void)
+{
+ uint32_t val;
+
+ /* 1. Config the LPAV PLL4 and DDR clock for the desired LPDDR operating frequency. */
+ mmio_setbits_32(IMX_PCC5_BASE + 0x108, BIT(30));
+
+ /* 2. Write PCC5.PCC_LPDDR4[SWRST] to 1b'1 to release LPDDR from reset. */
+ mmio_setbits_32(IMX_PCC5_BASE + 0x108, BIT(28));
+
+ /* 3. Reload the LPDDR CTL/PI/PHY register */
+ ddr_init();
+
+ if (dram_class == LPDDR4_TYPE) {
+ /* 4a. FIXME Set PHY_SET_DFI_INPUT_N parameters to 4'h1. LPDDR4 only */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PHY_1559, 0x01010101);
+
+ /*
+ * 4b. CTL PWRUP_SREFRESH_EXIT=1'b0 for disabling self refresh exit
+ * from controller.
+ */
+ /*
+ * 4c. PI_PWRUP_SELF_REF_EXIT=1, PI_MC_PWRUP_SELF_REF_EXIT=0 for enabling
+ * self refresh exit from PI
+ */
+ /* 4c. PI_INT_LVL_EN=0 to skip Initialization trainings. */
+ /*
+ * 4d. PI_WRLVL_EN_F0/1/2= PI_CALVL_EN_F0/1/2= PI_RDLVL_EN_F0/1/2=
+ * PI_RDLVL_GATE_EN_F0/1/2= PI_WDQLVL_EN_F0/1/2=0x2.
+ * Enable non initialization trainings.
+ */
+ /* 4e. PI_PWRUP_SREFRESH_EXIT_CS=0xF */
+ /* 4f. PI_DLL_RESET=0x1 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_137, 0x1);
+ /* PI_PWRUP_SELF_REF_EXIT = 1 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_132, 0x01000000);
+ /* PI_MC_PWRUP_SELF_REF_EXIT = 0 */
+ mmio_clrbits_32(IMX_DDRC_BASE + DENALI_PI_132, BIT(16));
+ /* PI_INT_LVL_EN = 0 */
+ mmio_clrbits_32(IMX_DDRC_BASE + DENALI_PI_04, BIT(0));
+ /* PI_WRLVL_EN_F0 = 3, PI_WRLVL_EN_F1 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_174, 0x03030000);
+ /* PI_WRLVL_EN_F2 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_175, 0x03);
+ /* PI_CALVL_EN_F0 = 3, PI_CALVL_EN_F1 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_191, 0x03030000);
+ /* PI_CALVL_EN_F2 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_192, 0x03);
+ /* PI_WDQLVL_EN_F0 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_212, 0x300);
+ /* PI_WDQLVL_EN_F1 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_214, 0x03000000);
+ /* PI_WDQLVL_EN_F2 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_217, 0x300);
+ /* PI_EDLVL_EN_F0 = 3, PI_EDLVL_GATE_EN_F0 = 3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_181, 0x03030000);
+ /*
+ * PI_RDLVL_EN_F1 = 3, PI_RDLVL_GATE_EN_F1 = 3,
+ * PI_RDLVL_EN_F2 = 3, PI_RDLVL_GATE_EN_F2 = 3
+ */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_182, 0x03030303);
+ /* PI_PWRUP_SREFRESH_EXIT_CS = 0xF */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_134, 0x000F0000);
+ } else {
+ /* PI_DLL_RESET=1 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_137, 0x1);
+ /* PI_PWRUP_SELF_REF_EXIT=1 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_132, 0x01000000);
+ /* PI_MC_PWRUP_SELF_REF_EXIT=0 */
+ mmio_clrbits_32(IMX_DDRC_BASE + DENALI_PI_132, BIT(16));
+ /* PI_INT_LVL_EN=0 */
+ mmio_clrbits_32(IMX_DDRC_BASE + DENALI_PI_04, BIT(0));
+ /* PI_WRLVL_EN_F0=3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_174, 0x00030000);
+ /* PI_CALVL_EN_F0=3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_191, 0x00030000);
+ /* PI_RDLVL_EN_F0=3,PI_RDLVL_GATE_EN_F0=3 */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_181, 0x03030000);
+ /* PI_PWRUP_SREFRESH_EXIT_CS=0xF */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_134, 0x000F0000);
+ }
+
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_144, 0x00002D00);
+
+ /* Force in-order AXI read data */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_144, 0x1);
+
+ /*
+ * Disable special R/W group switches so that R/W group placement
+ * is always at END of R/W group.
+ */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_249, 0x0);
+
+ /* Reduce time for IO pad calibration */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PHY_1590, 0x01000000);
+
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_25, 0x00020100);
+
+ /* PD disable */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_153, 0x04040000);
+ /*
+ * 5. Disable automatic LP entry and PCPCS modes LP_AUTO_ENTRY_EN
+ * to 1b'0, PCPCS_PD_EN to 1b'0
+ */
+
+ upwr_xcp_set_ddr_retention(APD_DOMAIN, 0, NULL);
+ upower_wait_resp();
+
+ if (dram_class == LPDDR4_TYPE) {
+ /* 7. Write PI START parameter to 1'b1 */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PI_00, 0x00000b01);
+
+ /* 8. Write CTL START parameter to 1'b1 */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_00, 0x00000b01);
+ } else {
+ /* 7. Write PI START parameter to 1'b1 */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_PI_00, 0x00000701);
+
+ /* 8. Write CTL START parameter to 1'b1 */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_00, 0x00000701);
+ }
+
+ /* 9. DENALI_CTL_266: Wait for INT_STATUS_INIT=0x2 */
+ do {
+ val = (mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_266) >> 8) & 0xFF;
+ } while (val != 0x2);
+
+ /*
+ * 10. Run SW trainings by setting PI_CALVL_REQ,PI_WRLVL_REQ,PI_RDLVL_GATE_REQ,
+ * PI_RDLVL_REQ,PI_WDQLVL_REQ(NA for LPDDR3) in same order.
+ */
+ if (dram_class == LPDDR4_TYPE) {
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_52, 0x10000); /* CALVL */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_26, 0x100); /* WRLVL */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_33, 0x10000); /* RDGATE */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_33, 0x100); /* RDQLVL */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_65, 0x10000); /* WDQLVL */
+
+ /* 11. Wait for trainings to get complete by polling PI_INT_STATUS */
+ while ((mmio_read_32(IMX_DDRC_BASE + DENALI_PI_77) & 0x07E00000) != 0x07E00000) {
+ ;
+ }
+ } else {
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_52, 0x10000); /* CALVL */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_26, 0x100); /* WRLVL */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_33, 0x10000); /* RDGATE */
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_PI_33, 0x100); /* RDQLVL */
+ while ((mmio_read_32(IMX_DDRC_BASE + DENALI_PI_77) & 0x05E00000) != 0x05E00000) {
+ ;
+ }
+ }
+
+ dram_lp_auto_enable();
+}
+
+#define LPDDR_DONE (0x1<<4)
+#define SOC_FREQ_CHG_ACK (0x1<<6)
+#define SOC_FREQ_CHG_REQ (0x1<<7)
+#define LPI_WAKEUP_EN (0x4<<8)
+#define SOC_FREQ_REQ (0x1<<11)
+
+static void set_cgc2_ddrclk(uint8_t src, uint8_t div)
+{
+
+ /* Wait until the reg is unlocked for writing */
+ while (mmio_read_32(IMX_CGC2_BASE + 0x40) & BIT(31))
+ ;
+
+ mmio_write_32(IMX_CGC2_BASE + 0x40, (src << 28) | (div << 21));
+ /* Wait for the clock switching done */
+ while (!(mmio_read_32(IMX_CGC2_BASE + 0x40) & BIT(27)))
+ ;
+}
+static void set_ddr_clk(uint32_t ddr_freq)
+{
+ /* Disable DDR clock */
+ mmio_clrbits_32(IMX_PCC5_BASE + 0x108, BIT(30));
+ switch (ddr_freq) {
+ /* boot frequency ? */
+ case 48:
+ set_cgc2_ddrclk(2, 0);
+ break;
+ /* default bypass frequency for fsp 1 */
+ case 192:
+ set_cgc2_ddrclk(0, 1);
+ break;
+ case 384:
+ set_cgc2_ddrclk(0, 0);
+ break;
+ case 264:
+ set_cgc2_ddrclk(4, 3);
+ break;
+ case 528:
+ set_cgc2_ddrclk(4, 1);
+ break;
+ default:
+ break;
+ }
+ /* Enable DDR clock */
+ mmio_setbits_32(IMX_PCC5_BASE + 0x108, BIT(30));
+
+ /* Wait until the reg is unlocked for writing */
+ while (mmio_read_32(IMX_CGC2_BASE + 0x40) & BIT(31)) {
+ ;
+ }
+}
+
+#define AVD_SIM_LPDDR_CTRL (IMX_LPAV_SIM_BASE + 0x14)
+#define AVD_SIM_LPDDR_CTRL2 (IMX_LPAV_SIM_BASE + 0x18)
+#define MAX_FSP_NUM U(3)
+#define DDR_DFS_GET_FSP_COUNT 0x10
+#define DDR_BYPASS_DRATE U(400)
+
+extern int upower_pmic_i2c_write(uint32_t reg_addr, uint32_t reg_val);
+
+/* Normally, we only switch frequency between 1(bypass) and 2(highest) */
+int lpddr4_dfs(uint32_t freq_index)
+{
+ uint32_t lpddr_ctrl, lpddr_ctrl2;
+ uint32_t ddr_ctl_144;
+
+ /*
+ * Valid index: 0 to 2
+ * index 0: boot frequency
+ * index 1: bypass frequency
+ * index 2: highest frequency
+ */
+ if (freq_index > 2U) {
+ return -1;
+ }
+
+ /*
+ * increase the voltage to 1.1V firstly before increase frequency
+ * and APD enter OD mode
+ */
+ if (freq_index == 2U && sys_dvfs) {
+ upower_pmic_i2c_write(0x22, 0x28);
+ }
+
+ /* Enable LPI_WAKEUP_EN */
+ ddr_ctl_144 = mmio_read_32(IMX_DDRC_BASE + DENALI_CTL_144);
+ mmio_setbits_32(IMX_DDRC_BASE + DENALI_CTL_144, LPI_WAKEUP_EN);
+
+ /* put DRAM into long self-refresh & clock gating */
+ lpddr_ctrl = mmio_read_32(AVD_SIM_LPDDR_CTRL);
+ lpddr_ctrl = (lpddr_ctrl & ~((0x3f << 15) | (0x3 << 9))) | (0x28 << 15) | (freq_index << 9);
+ mmio_write_32(AVD_SIM_LPDDR_CTRL, lpddr_ctrl);
+
+ /* Gating the clock */
+ lpddr_ctrl2 = mmio_read_32(AVD_SIM_LPDDR_CTRL2);
+ mmio_setbits_32(AVD_SIM_LPDDR_CTRL2, LPDDR_EN_CLKGATE);
+
+ /* Request frequency change */
+ mmio_setbits_32(AVD_SIM_LPDDR_CTRL, SOC_FREQ_REQ);
+
+ do {
+ lpddr_ctrl = mmio_read_32(AVD_SIM_LPDDR_CTRL);
+ if (lpddr_ctrl & SOC_FREQ_CHG_REQ) {
+ /* Bypass mode */
+ if (info->fsp_table[freq_index] < DDR_BYPASS_DRATE) {
+ /* Change to PLL bypass mode */
+ mmio_write_32(IMX_LPAV_SIM_BASE, 0x1);
+ /* change the ddr clock source & frequency */
+ set_ddr_clk(info->fsp_table[freq_index]);
+ } else {
+ /* Change to PLL unbypass mode */
+ mmio_write_32(IMX_LPAV_SIM_BASE, 0x0);
+ /* change the ddr clock source & frequency */
+ set_ddr_clk(info->fsp_table[freq_index] >> 1);
+ }
+
+ mmio_clrsetbits_32(AVD_SIM_LPDDR_CTRL, SOC_FREQ_CHG_REQ, SOC_FREQ_CHG_ACK);
+ continue;
+ }
+ } while ((lpddr_ctrl & LPDDR_DONE) != 0); /* several try? */
+
+ /* restore the original setting */
+ mmio_write_32(IMX_DDRC_BASE + DENALI_CTL_144, ddr_ctl_144);
+ mmio_write_32(AVD_SIM_LPDDR_CTRL2, lpddr_ctrl2);
+
+ /* Check the DFS result */
+ lpddr_ctrl = mmio_read_32(AVD_SIM_LPDDR_CTRL) & 0xF;
+ if (lpddr_ctrl != 0U) {
+ /* Must be something wrong, return failure */
+ return -1;
+ }
+
+ /* decrease the BUCK3 voltage after frequency changed to lower
+ * and APD in ND_MODE
+ */
+ if (freq_index == 1U && sys_dvfs) {
+ upower_pmic_i2c_write(0x22, 0x20);
+ }
+
+ /* DFS done successfully */
+ return 0;
+}
+
+/* for the non-primary core, waiting for DFS done */
+static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
+ void *handle, void *cookie)
+{
+ uint32_t irq;
+
+ irq = plat_ic_acknowledge_interrupt();
+ if (irq < 1022U) {
+ plat_ic_end_of_interrupt(irq);
+ }
+
+ /* set the WFE done status */
+ spin_lock(&dfs_lock);
+ core_count++;
+ dsb();
+ spin_unlock(&dfs_lock);
+
+ while (in_progress) {
+ wfe();
+ }
+
+ return 0;
+}
+
+int dram_dvfs_handler(uint32_t smc_fid, void *handle,
+ u_register_t x1, u_register_t x2, u_register_t x3)
+{
+ unsigned int fsp_index = x1;
+ uint32_t online_cpus = x2 - 1;
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+ /* Get the number of FSPs */
+ if (x1 == DDR_DFS_GET_FSP_COUNT) {
+ SMC_RET2(handle, num_fsp, info->fsp_table[1]);
+ }
+
+ /* start lpddr frequency scaling */
+ in_progress = true;
+ sys_dvfs = x3 ? true : false;
+ dsb();
+
+ /* notify other core wait for scaling done */
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++)
+ /* Skip raise SGI for current CPU */
+ if (i != cpu_id) {
+ plat_ic_raise_el3_sgi(0x8, i);
+ }
+
+ /* Make sure all the cpu in WFE */
+ while (online_cpus != core_count) {
+ ;
+ }
+
+ /* Flush the L1/L2 cache */
+ dcsw_op_all(DCCSW);
+
+ lpddr4_dfs(fsp_index);
+
+ in_progress = false;
+ core_count = 0;
+ dsb();
+ sev();
+ isb();
+
+ SMC_RET1(handle, 0);
+}
+
+void dram_init(void)
+{
+ uint32_t flags = 0;
+ uint32_t rc;
+ unsigned int i;
+
+ /* Register the EL3 handler for DDR DVFS */
+ set_interrupt_rm_flag(flags, NON_SECURE);
+ rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
+ if (rc) {
+ panic();
+ }
+
+ info = (struct dram_timing_info *)SAVED_DRAM_DATA_BASE;
+
+ /* Get the num of the supported Fsp */
+ for (i = 0; i < MAX_FSP_NUM; i++) {
+ if (!info->fsp_table[i]) {
+ break;
+ }
+ }
+
+ num_fsp = (i > MAX_FSP_NUM) ? MAX_FSP_NUM : i;
+}
diff --git a/plat/imx/imx8ulp/imx8ulp_bl31_setup.c b/plat/imx/imx8ulp/imx8ulp_bl31_setup.c
new file mode 100644
index 0000000000..696f4b65a7
--- /dev/null
+++ b/plat/imx/imx8ulp/imx8ulp_bl31_setup.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#include <dram.h>
+#include <imx8_lpuart.h>
+#include <imx8ulp_caam.h>
+#include <imx_plat_common.h>
+#include <plat_imx8.h>
+#include <upower_api.h>
+#include <xrdc.h>
+
+#define MAP_BL31_TOTAL \
+ MAP_REGION_FLAT(BL31_BASE, BL31_LIMIT - BL31_BASE, MT_MEMORY | MT_RW | MT_SECURE)
+#define MAP_BL31_RO \
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE)
+#define MAP_BL32_TOTAL MAP_REGION_FLAT(BL32_BASE, BL32_SIZE, MT_MEMORY | MT_RW)
+#define MAP_COHERENT_MEM \
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, (BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE), \
+ MT_DEVICE | MT_RW | MT_SECURE)
+
+#define TRUSTY_PARAMS_LEN_BYTES (4096*2)
+
+static const mmap_region_t imx_mmap[] = {
+ DEVICE0_MAP, DEVICE1_MAP, DEVICE2_MAP,
+ ELE_MAP, SEC_SIM_MAP, SRAM0_MAP,
+ {0}
+};
+
+extern uint32_t upower_init(void);
+extern void imx8ulp_init_scmi_server(void);
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ static console_t console;
+
+ /* config the TPM5 clock */
+ mmio_write_32(IMX_PCC3_BASE + 0xd0, 0x92000000);
+ mmio_write_32(IMX_PCC3_BASE + 0xd0, 0xd2000000);
+
+ /* enable the GPIO D,E,F non-secure access by default */
+ mmio_write_32(IMX_PCC4_BASE + 0x78, 0xc0000000);
+ mmio_write_32(IMX_PCC4_BASE + 0x7c, 0xc0000000);
+ mmio_write_32(IMX_PCC5_BASE + 0x114, 0xc0000000);
+
+ mmio_write_32(IMX_GPIOE_BASE + 0x10, 0xffffffff);
+ mmio_write_32(IMX_GPIOE_BASE + 0x14, 0x3);
+ mmio_write_32(IMX_GPIOE_BASE + 0x18, 0xffffffff);
+ mmio_write_32(IMX_GPIOE_BASE + 0x1c, 0x3);
+
+ mmio_write_32(IMX_GPIOF_BASE + 0x10, 0xffffffff);
+ mmio_write_32(IMX_GPIOF_BASE + 0x14, 0x3);
+ mmio_write_32(IMX_GPIOF_BASE + 0x18, 0xffffffff);
+ mmio_write_32(IMX_GPIOF_BASE + 0x1c, 0x3);
+
+ mmio_write_32(IMX_GPIOD_BASE + 0x10, 0xffffffff);
+ mmio_write_32(IMX_GPIOD_BASE + 0x14, 0x3);
+ mmio_write_32(IMX_GPIOD_BASE + 0x18, 0xffffffff);
+ mmio_write_32(IMX_GPIOD_BASE + 0x1c, 0x3);
+
+ console_lpuart_register(IMX_LPUART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+ IMX_CONSOLE_BAUDRATE, &console);
+
+ /* This console is only used for boot stage */
+ console_set_scope(&console, CONSOLE_FLAG_BOOT | CONSOLE_FLAG_RUNTIME);
+
+ bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET;
+ bl33_image_ep_info.spsr = plat_get_spsr_for_bl33_entry();
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#if defined(SPD_opteed) || defined(SPD_trusty)
+ /* Populate entry point information for BL32 */
+ SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
+ SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+ bl32_image_ep_info.pc = BL32_BASE;
+ bl32_image_ep_info.spsr = 0;
+
+ /* Pass TEE base and size to bl33 */
+ bl33_image_ep_info.args.arg1 = BL32_BASE;
+ bl33_image_ep_info.args.arg2 = BL32_SIZE;
+
+#ifdef SPD_trusty
+ bl32_image_ep_info.args.arg0 = BL32_SIZE;
+ bl32_image_ep_info.args.arg1 = BL32_BASE;
+#else
+ /* Make sure memory is clean */
+ mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0);
+ bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+ bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+#endif
+#endif
+}
+
+void bl31_plat_arch_setup(void)
+{
+ const mmap_region_t bl_regions[] = {
+ MAP_BL31_TOTAL,
+ MAP_BL31_RO,
+#if USE_COHERENT_MEM
+ MAP_COHERENT_MEM,
+#endif
+#if defined(SPD_opteed) || defined(SPD_trusty)
+ MAP_BL32_TOTAL,
+#endif
+ {0},
+ };
+
+ setup_page_tables(bl_regions, imx_mmap);
+ enable_mmu_el3(0);
+
+ /* TODO: Hack, refine this piece, scmi channel free */
+ mmio_write_32(SRAM0_BASE + 0x4, 1);
+
+ /* Allow M core to reset A core */
+ mmio_clrbits_32(IMX_MU0B_BASE + 0x10, BIT(2));
+}
+
+void bl31_platform_setup(void)
+{
+ /* select the arch timer source */
+ mmio_setbits_32(IMX_SIM1_BASE + 0x30, 0x8000000);
+
+ generic_delay_timer_init();
+
+ plat_gic_driver_init();
+ plat_gic_init();
+
+ imx8ulp_init_scmi_server();
+ upower_init();
+
+ xrdc_apply_apd_config();
+ xrdc_apply_lpav_config();
+ xrdc_enable();
+
+ imx8ulp_caam_init();
+
+ dram_init();
+}
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type)
+{
+ if (type == NON_SECURE) {
+ return &bl33_image_ep_info;
+ } else {
+ return &bl32_image_ep_info;
+ }
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return COUNTER_FREQUENCY;
+}
+
+void bl31_plat_runtime_setup(void)
+{
+}
+
+#ifdef SPD_trusty
+void plat_trusty_set_boot_args(aapcs64_params_t *args)
+{
+ args->arg0 = BL32_SIZE;
+ args->arg1 = BL32_BASE;
+ args->arg2 = TRUSTY_PARAMS_LEN_BYTES;
+}
+#endif
diff --git a/plat/imx/imx8ulp/imx8ulp_caam.c b/plat/imx/imx8ulp/imx8ulp_caam.c
new file mode 100644
index 0000000000..d150fe2c38
--- /dev/null
+++ b/plat/imx/imx8ulp/imx8ulp_caam.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2021-2024 NXP.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include <imx8ulp_caam.h>
+
+void imx8ulp_caam_init(void)
+{
+ /* config CAAM JRaMID set MID to Cortex A */
+ mmio_write_32(CAAM_JR0MID, CAAM_NS_MID);
+ mmio_write_32(CAAM_JR1MID, CAAM_NS_MID);
+ mmio_write_32(CAAM_JR2MID, CAAM_NS_MID);
+ mmio_write_32(CAAM_JR3MID, CAAM_NS_MID);
+}
diff --git a/plat/imx/imx8ulp/imx8ulp_psci.c b/plat/imx/imx8ulp/imx8ulp_psci.c
new file mode 100644
index 0000000000..628aceabd0
--- /dev/null
+++ b/plat/imx/imx8ulp/imx8ulp_psci.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/gicv3.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+
+#include <plat_imx8.h>
+#include <upower_api.h>
+
+extern void cgc1_save(void);
+extern void cgc1_restore(void);
+extern void imx_apd_ctx_save(unsigned int cpu);
+extern void imx_apd_ctx_restore(unsigned int cpu);
+extern void usb_wakeup_enable(bool enable);
+extern void upower_wait_resp(void);
+extern bool is_lpav_owned_by_apd(void);
+extern void apd_io_pad_off(void);
+extern int upower_pmic_i2c_read(uint32_t reg_addr, uint32_t *reg_val);
+extern void imx8ulp_init_scmi_server(void);
+
+static uintptr_t secure_entrypoint;
+
+#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
+#define RVBARADDRx(c) (IMX_SIM1_BASE + 0x5c + 0x4 * (c))
+#define WKPUx(c) (IMX_SIM1_BASE + 0x3c + 0x4 * (c))
+#define AD_COREx_LPMODE(c) (IMX_CMC1_BASE + 0x50 + 0x4 * (c))
+
+#define PMIC_CFG(v, m, msk) \
+ { \
+ .volt = (v), \
+ .mode = (m), \
+ .mode_msk = (msk), \
+ }
+
+#define PAD_CFG(c, r, t) \
+ { \
+ .pad_close = (c), \
+ .pad_reset = (r), \
+ .pad_tqsleep = (t) \
+ }
+
+#define BIAS_CFG(m, n, p, mbias) \
+ { \
+ .dombias_cfg = { \
+ .mode = (m), \
+ .rbbn = (n), \
+ .rbbp = (p), \
+ }, \
+ .membias_cfg = {mbias}, \
+ }
+
+#define SWT_BOARD(swt_on, msk) \
+ { \
+ .on = (swt_on), \
+ .mask = (msk), \
+ }
+
+#define SWT_MEM(a, p, m) \
+ { \
+ .array = (a), \
+ .perif = (p), \
+ .mask = (m), \
+ }
+
+static int imx_pwr_set_cpu_entry(unsigned int cpu, unsigned int entry)
+{
+ mmio_write_32(RVBARADDRx(cpu), entry);
+
+ /* set update bit */
+ mmio_write_32(IMX_SIM1_BASE + 0x8, mmio_read_32(IMX_SIM1_BASE + 0x8) | BIT_32(24 + cpu));
+ /* wait for ack */
+ while (!(mmio_read_32(IMX_SIM1_BASE + 0x8) & BIT_32(26 + cpu))) {
+ }
+
+ /* clear update bit */
+ mmio_write_32(IMX_SIM1_BASE + 0x8, mmio_read_32(IMX_SIM1_BASE + 0x8) & ~BIT_32(24 + cpu));
+ /* clear ack bit */
+ mmio_write_32(IMX_SIM1_BASE + 0x8, mmio_read_32(IMX_SIM1_BASE + 0x8) | BIT_32(26 + cpu));
+
+ return 0;
+}
+
+static volatile uint32_t cgc1_nicclk;
+int imx_pwr_domain_on(u_register_t mpidr)
+{
+ unsigned int cpu = MPIDR_AFFLVL0_VAL(mpidr);
+
+ imx_pwr_set_cpu_entry(cpu, secure_entrypoint);
+
+ /* slow down the APD NIC bus clock */
+ cgc1_nicclk = mmio_read_32(IMX_CGC1_BASE + 0x34);
+ mmio_clrbits_32(IMX_CGC1_BASE + 0x34, GENMASK_32(29, 28));
+
+ mmio_write_32(IMX_CMC1_BASE + 0x18, 0x3f);
+ mmio_write_32(IMX_CMC1_BASE + 0x50 + 0x4 * cpu, 0);
+
+ /* enable wku wakeup for idle */
+ mmio_write_32(IMX_SIM1_BASE + 0x3c + 0x4 * cpu, 0xffffffff);
+
+ return PSCI_E_SUCCESS;
+}
+
+void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ imx_pwr_set_cpu_entry(0, IMX_ROM_ENTRY);
+ plat_gic_pcpu_init();
+ plat_gic_cpuif_enable();
+
+ /* set APD NIC back to orignally setting */
+ mmio_write_32(IMX_CGC1_BASE + 0x34, cgc1_nicclk);
+}
+
+int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
+{
+ return PSCI_E_SUCCESS;
+}
+
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ unsigned int cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
+
+ plat_gic_cpuif_disable();
+
+ /* disable wakeup */
+ mmio_write_32(WKPUx(cpu), 0);
+
+ /* set core power mode to PD */
+ mmio_write_32(AD_COREx_LPMODE(cpu), 0x3);
+}
+
+/* APD power mode config */
+ps_apd_pwr_mode_cfgs_t apd_pwr_mode_cfgs = {
+ [DPD_PWR_MODE] = {
+ .swt_board_offs = 0x180,
+ .swt_mem_offs = 0x188,
+ .pmic_cfg = PMIC_CFG(0x23, 0x0, 0x2),
+ .pad_cfg = PAD_CFG(0x0, 0xc, 0x01e80a02),
+ .bias_cfg = BIAS_CFG(0x0, 0x2, 0x2, 0x0),
+ },
+
+ /* PD */
+ [PD_PWR_MODE] = {
+ .swt_board_offs = 0x170,
+ .swt_mem_offs = 0x178,
+ .pmic_cfg = PMIC_CFG(0x23, 0x0, 0x2),
+ .pad_cfg = PAD_CFG(0x0, 0xc, 0x01e80a00),
+ .bias_cfg = BIAS_CFG(0x0, 0x2, 0x2, 0x0),
+ },
+
+ [ADMA_PWR_MODE] = {
+ .swt_board_offs = 0x120,
+ .swt_mem_offs = 0x128,
+ .pmic_cfg = PMIC_CFG(0x23, 0x0, 0x2),
+ .pad_cfg = PAD_CFG(0x0, 0x0, 0x0deb7a00),
+ .bias_cfg = BIAS_CFG(0x2, 0x2, 0x2, 0x0),
+ },
+
+ [ACT_PWR_MODE] = {
+ .swt_board_offs = 0x110,
+ .swt_mem_offs = 0x118,
+ .pmic_cfg = PMIC_CFG(0x23, 0x0, 0x2),
+ .pad_cfg = PAD_CFG(0x0, 0x0, 0x0deb7a00),
+ .bias_cfg = BIAS_CFG(0x2, 0x2, 0x2, 0x0),
+ },
+};
+
+/* APD power switch config */
+ps_apd_swt_cfgs_t apd_swt_cfgs = {
+ [DPD_PWR_MODE] = {
+ .swt_board[0] = SWT_BOARD(0x0, 0x1fffc),
+ .swt_mem[0] = SWT_MEM(0x0, 0x0, 0x1ffff),
+ .swt_mem[1] = SWT_MEM(0x003fffff, 0x003fffff, 0x0),
+ },
+
+ [PD_PWR_MODE] = {
+ .swt_board[0] = SWT_BOARD(0x0, 0x00001fffc),
+ .swt_mem[0] = SWT_MEM(0x00010c00, 0x0, 0x1ffff),
+ .swt_mem[1] = SWT_MEM(0x003fffff, 0x003f0000, 0x0),
+ },
+
+ [ADMA_PWR_MODE] = {
+ .swt_board[0] = SWT_BOARD(0x15f74, 0x15f74),
+ .swt_mem[0] = SWT_MEM(0x0001fffd, 0x0001fffd, 0x1ffff),
+ .swt_mem[1] = SWT_MEM(0x003fffff, 0x003fffff, 0x0),
+ },
+
+ [ACT_PWR_MODE] = {
+ .swt_board[0] = SWT_BOARD(0x15f74, 0x15f74),
+ .swt_mem[0] = SWT_MEM(0x0001fffd, 0x0001fffd, 0x1ffff),
+ .swt_mem[1] = SWT_MEM(0x003fffff, 0x003fffff, 0x0),
+ },
+};
+
+/* PMIC config for power down, LDO1 should be OFF */
+ps_apd_pmic_reg_data_cfgs_t pd_pmic_reg_cfgs = {
+ [0] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = PD_PWR_MODE,
+ .i2c_addr = 0x30,
+ .i2c_data = 0x9c,
+ },
+ [1] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = PD_PWR_MODE,
+ .i2c_addr = 0x22,
+ .i2c_data = 0xb,
+ },
+ [2] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = ACT_PWR_MODE,
+ .i2c_addr = 0x30,
+ .i2c_data = 0x9d,
+ },
+ [3] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = ACT_PWR_MODE,
+ .i2c_addr = 0x22,
+ .i2c_data = 0x28,
+ },
+};
+
+/* PMIC config for deep power down, BUCK3 should be OFF */
+ps_apd_pmic_reg_data_cfgs_t dpd_pmic_reg_cfgs = {
+ [0] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = DPD_PWR_MODE,
+ .i2c_addr = 0x21,
+ .i2c_data = 0x78,
+ },
+ [1] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = DPD_PWR_MODE,
+ .i2c_addr = 0x30,
+ .i2c_data = 0x9c,
+ },
+ [2] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = ACT_PWR_MODE,
+ .i2c_addr = 0x21,
+ .i2c_data = 0x79,
+ },
+ [3] = {
+ .tag = PMIC_REG_VALID_TAG,
+ .power_mode = ACT_PWR_MODE,
+ .i2c_addr = 0x30,
+ .i2c_data = 0x9d,
+ },
+};
+
+struct ps_pwr_mode_cfg_t *pwr_sys_cfg = (struct ps_pwr_mode_cfg_t *)UPWR_DRAM_SHARED_BASE_ADDR;
+
+void imx_set_pwr_mode_cfg(abs_pwr_mode_t mode)
+{
+ uint32_t volt;
+
+ if (mode >= NUM_PWR_MODES) {
+ return;
+ }
+
+ /* apd power mode config */
+ memcpy(&pwr_sys_cfg->ps_apd_pwr_mode_cfg[mode], &apd_pwr_mode_cfgs[mode],
+ sizeof(struct ps_apd_pwr_mode_cfg_t));
+
+ /* apd power switch config */
+ memcpy(&pwr_sys_cfg->ps_apd_swt_cfg[mode], &apd_swt_cfgs[mode], sizeof(swt_config_t));
+
+ /*
+ * BUCK3 & LDO1 can only be shutdown when LPAV is owned by APD side
+ * otherwise RTD side is responsible to control them in low power mode.
+ */
+ if (is_lpav_owned_by_apd()) {
+ /* power off the BUCK3 in DPD mode */
+ if (mode == DPD_PWR_MODE) {
+ memcpy(&pwr_sys_cfg->ps_apd_pmic_reg_data_cfg, &dpd_pmic_reg_cfgs,
+ sizeof(ps_apd_pmic_reg_data_cfgs_t));
+ /* LDO1 should be power off in PD mode */
+ } else if (mode == PD_PWR_MODE) {
+ /* overwrite the buck3 voltage setting in active mode */
+ upower_pmic_i2c_read(0x22, &volt);
+ pd_pmic_reg_cfgs[3].i2c_data = volt;
+ memcpy(&pwr_sys_cfg->ps_apd_pmic_reg_data_cfg, &pd_pmic_reg_cfgs,
+ sizeof(ps_apd_pmic_reg_data_cfgs_t));
+ }
+ }
+}
+
+void imx_domain_suspend(const psci_power_state_t *target_state)
+{
+ unsigned int cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
+
+ if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+ plat_gic_cpuif_disable();
+ imx_pwr_set_cpu_entry(cpu, secure_entrypoint);
+ /* core put into power down */
+ mmio_write_32(IMX_CMC1_BASE + 0x50 + 0x4 * cpu, 0x3);
+ /* FIXME config wakeup interrupt in WKPU */
+ mmio_write_32(IMX_SIM1_BASE + 0x3c + 0x4 * cpu, 0x7fffffe3);
+ } else {
+ /* for core standby/retention mode */
+ mmio_write_32(IMX_CMC1_BASE + 0x50 + 0x4 * cpu, 0x1);
+ mmio_write_32(IMX_SIM1_BASE + 0x3c + 0x4 * cpu, 0x7fffffe3);
+ dsb();
+ write_scr_el3(read_scr_el3() | SCR_FIQ_BIT);
+ isb();
+ }
+
+ if (is_local_state_retn(CLUSTER_PWR_STATE(target_state))) {
+ /*
+ * just for sleep mode for now, need to update to
+ * support more modes, same for suspend finish call back.
+ */
+ mmio_write_32(IMX_CMC1_BASE + 0x10, 0x1);
+ mmio_write_32(IMX_CMC1_BASE + 0x20, 0x1);
+
+ } else if (is_local_state_off(CLUSTER_PWR_STATE(target_state))) {
+ /*
+ * for cluster off state, put cluster into power down mode,
+ * config the cluster clock to be off.
+ */
+ mmio_write_32(IMX_CMC1_BASE + 0x10, 0x7);
+ mmio_write_32(IMX_CMC1_BASE + 0x20, 0xf);
+ }
+
+ if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) {
+ /*
+ * low power mode config info used by upower
+ * to do low power mode transition.
+ */
+ imx_set_pwr_mode_cfg(ADMA_PWR_MODE);
+ imx_set_pwr_mode_cfg(ACT_PWR_MODE);
+ imx_set_pwr_mode_cfg(PD_PWR_MODE);
+
+ /* clear the upower wakeup */
+ upwr_xcp_set_rtd_apd_llwu(APD_DOMAIN, 0, NULL);
+ upower_wait_resp();
+
+ /* enable the USB wakeup */
+ usb_wakeup_enable(true);
+
+ /* config the WUU to enabled the wakeup source */
+ mmio_write_32(IMX_PCC3_BASE + 0x98, 0xc0800000);
+
+ /* !!! clear all the pad wakeup pending event */
+ mmio_write_32(IMX_WUU1_BASE + 0x20, 0xffffffff);
+
+ /* enable upower usb phy wakeup by default */
+ mmio_setbits_32(IMX_WUU1_BASE + 0x18, BIT(4) | BIT(1) | BIT(0));
+
+ /* enabled all pad wakeup by default */
+ mmio_write_32(IMX_WUU1_BASE + 0x8, 0xffffffff);
+
+ /* save the AD domain context before entering PD mode */
+ imx_apd_ctx_save(cpu);
+ }
+}
+
+#define DRAM_LPM_STATUS U(0x2802b004)
+void imx_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ unsigned int cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
+
+ if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) {
+ /* restore the ap domain context */
+ imx_apd_ctx_restore(cpu);
+
+ /* clear the upower wakeup */
+ upwr_xcp_set_rtd_apd_llwu(APD_DOMAIN, 0, NULL);
+ upower_wait_resp();
+
+ /* disable all pad wakeup */
+ mmio_write_32(IMX_WUU1_BASE + 0x8, 0x0);
+
+ /* clear all the pad wakeup pending event */
+ mmio_write_32(IMX_WUU1_BASE + 0x20, 0xffffffff);
+
+ /*
+ * disable the usb wakeup after resume to make sure the pending
+ * usb wakeup in WUU can be cleared successfully, otherwise,
+ * APD will resume failed in next PD mode.
+ */
+ usb_wakeup_enable(false);
+
+ /* re-init the SCMI channel */
+ imx8ulp_init_scmi_server();
+ }
+
+ /*
+ * wait for DDR is ready when DDR is under the RTD
+ * side control for power saving
+ */
+ while (mmio_read_32(DRAM_LPM_STATUS) != 0) {
+ ;
+ }
+
+ /*
+ * when resume from low power mode, need to delay for a while
+ * before access the CMC register.
+ */
+ udelay(5);
+
+ /* clear cluster's LPM setting. */
+ mmio_write_32(IMX_CMC1_BASE + 0x20, 0x0);
+ mmio_write_32(IMX_CMC1_BASE + 0x10, 0x0);
+
+ /* clear core's LPM setting */
+ mmio_write_32(IMX_CMC1_BASE + 0x50 + 0x4 * cpu, 0x0);
+ mmio_write_32(IMX_SIM1_BASE + 0x3c + 0x4 * cpu, 0x0);
+
+ if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+ imx_pwr_set_cpu_entry(0, IMX_ROM_ENTRY);
+ plat_gic_cpuif_enable();
+ } else {
+ dsb();
+ write_scr_el3(read_scr_el3() & (~SCR_FIQ_BIT));
+ isb();
+ }
+}
+
+void __dead2 imx8ulp_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
+{
+ while (1) {
+ wfi();
+ }
+}
+
+void __dead2 imx8ulp_system_reset(void)
+{
+ imx_pwr_set_cpu_entry(0, IMX_ROM_ENTRY);
+
+ /* Write invalid command to WDOG CNT to trigger reset */
+ mmio_write_32(IMX_WDOG3_BASE + 0x4, 0x12345678);
+
+ while (true) {
+ wfi();
+ }
+}
+
+int imx_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+ int pwr_type = psci_get_pstate_type(power_state);
+
+ if (pwr_lvl > PLAT_MAX_PWR_LVL) {
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ if (pwr_type == PSTATE_TYPE_STANDBY) {
+ CORE_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+ CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+ }
+
+ /* No power down state support */
+ if (pwr_type == PSTATE_TYPE_POWERDOWN) {
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+void imx_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ unsigned int i;
+
+ for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) {
+ req_state->pwr_domain_state[i] = PLAT_POWER_DOWN_OFF_STATE;
+ }
+}
+
+void __dead2 imx_system_off(void)
+{
+ unsigned int i;
+
+ /* config the all the core into OFF mode and IRQ masked. */
+ for (i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ /* disable wakeup from wkpu */
+ mmio_write_32(WKPUx(i), 0x0);
+
+ /* reset the core reset entry to 0x1000 */
+ imx_pwr_set_cpu_entry(i, 0x1000);
+
+ /* config the core power mode to off */
+ mmio_write_32(AD_COREx_LPMODE(i), 0x3);
+ }
+
+ plat_gic_cpuif_disable();
+
+ /* power off all the pad */
+ apd_io_pad_off();
+
+ /* Config the power mode info for entering DPD mode and ACT mode */
+ imx_set_pwr_mode_cfg(ADMA_PWR_MODE);
+ imx_set_pwr_mode_cfg(ACT_PWR_MODE);
+ imx_set_pwr_mode_cfg(DPD_PWR_MODE);
+
+ /* Set the APD domain into DPD mode */
+ mmio_write_32(IMX_CMC1_BASE + 0x10, 0x7);
+ mmio_write_32(IMX_CMC1_BASE + 0x20, 0x1f);
+
+ /* make sure no pending upower wakeup */
+ upwr_xcp_set_rtd_apd_llwu(APD_DOMAIN, 0, NULL);
+ upower_wait_resp();
+
+ /* enable the upower wakeup from wuu, act as APD boot up method */
+ mmio_write_32(IMX_PCC3_BASE + 0x98, 0xc0800000);
+ mmio_setbits_32(IMX_WUU1_BASE + 0x18, BIT(4));
+
+ /* make sure no pad wakeup event is pending */
+ mmio_write_32(IMX_WUU1_BASE + 0x20, 0xffffffff);
+
+ wfi();
+
+ ERROR("power off failed.\n");
+ panic();
+}
+
+static const plat_psci_ops_t imx_plat_psci_ops = {
+ .pwr_domain_on = imx_pwr_domain_on,
+ .pwr_domain_on_finish = imx_pwr_domain_on_finish,
+ .validate_ns_entrypoint = imx_validate_ns_entrypoint,
+ .system_off = imx_system_off,
+ .system_reset = imx8ulp_system_reset,
+ .pwr_domain_off = imx_pwr_domain_off,
+ .pwr_domain_suspend = imx_domain_suspend,
+ .pwr_domain_suspend_finish = imx_domain_suspend_finish,
+ .get_sys_suspend_power_state = imx_get_sys_suspend_power_state,
+ .validate_power_state = imx_validate_power_state,
+ .pwr_domain_pwr_down_wfi = imx8ulp_pwr_domain_pwr_down_wfi,
+};
+
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ secure_entrypoint = sec_entrypoint;
+ imx_pwr_set_cpu_entry(0, sec_entrypoint);
+ *psci_ops = &imx_plat_psci_ops;
+
+ mmio_write_32(IMX_CMC1_BASE + 0x18, 0x3f);
+ mmio_write_32(IMX_SIM1_BASE + 0x3c, 0xffffffff);
+
+ return 0;
+}
diff --git a/plat/imx/imx8ulp/include/dram.h b/plat/imx/imx8ulp/include/dram.h
new file mode 100644
index 0000000000..9ed8969a1e
--- /dev/null
+++ b/plat/imx/imx8ulp/include/dram.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef DRAM_H
+#define DRAM_H
+
+void dram_init(void);
+
+#endif /* DRAM_H */
+
diff --git a/plat/imx/imx8ulp/include/imx8ulp_caam.h b/plat/imx/imx8ulp/include/imx8ulp_caam.h
new file mode 100644
index 0000000000..1b93d7dd08
--- /dev/null
+++ b/plat/imx/imx8ulp/include/imx8ulp_caam.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2021-2024 NXP.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8ULP_CAAM_H
+#define IMX8ULP_CAAM_H
+
+#include <lib/utils_def.h>
+
+#include <platform_def.h>
+
+#define CAAM_JR0MID (IMX_CAAM_BASE + 0x10)
+#define CAAM_JR1MID (IMX_CAAM_BASE + 0x18)
+#define CAAM_JR2MID (IMX_CAAM_BASE + 0x20)
+#define CAAM_JR3MID (IMX_CAAM_BASE + 0x28)
+#define CAAM_NS_MID (0x7)
+
+#define JR0_BASE (IMX_CAAM_BASE + 0x1000)
+
+void imx8ulp_caam_init(void);
+
+#endif /* IMX8ULP_CAAM_H */
diff --git a/plat/imx/imx8ulp/include/platform_def.h b/plat/imx/imx8ulp/include/platform_def.h
new file mode 100644
index 0000000000..20c5851ba5
--- /dev/null
+++ b/plat/imx/imx8ulp/include/platform_def.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <lib/utils_def.h>
+
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+#define PLATFORM_STACK_SIZE 0x400
+#define CACHE_WRITEBACK_GRANULE 64
+
+#define PLAT_PRIMARY_CPU 0x0
+#define PLATFORM_MAX_CPU_PER_CLUSTER 2
+#define PLATFORM_CLUSTER_COUNT 1
+#define PLATFORM_CORE_COUNT 2
+#define PLATFORM_CLUSTER0_CORE_COUNT 2
+#define PLATFORM_CLUSTER1_CORE_COUNT 0
+
+#define IMX_PWR_LVL0 MPIDR_AFFLVL0
+#define IMX_PWR_LVL1 MPIDR_AFFLVL1
+#define IMX_PWR_LVL2 MPIDR_AFFLVL2
+
+#define PWR_DOMAIN_AT_MAX_LVL U(1)
+#define PLAT_MAX_PWR_LVL U(2)
+
+#define PLAT_SLEEP_RET_STATE U(1)
+#define PLAT_DEEP_SLEEP_RET_STATE U(2)
+#define PLAT_MAX_RET_STATE U(3)
+
+#define PLAT_POWER_DOWN_OFF_STATE U(4)
+#define PLAT_DEEP_POWER_DOWN_STATE U(5)
+#define PLAT_MAX_OFF_STATE U(6)
+
+#define BL31_BASE 0x20040000
+#define BL31_LIMIT 0x20070000
+
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32)
+
+#ifdef SPD_trusty
+#define MAX_XLAT_TABLES 11
+#define MAX_MMAP_REGIONS 12
+#else
+#define MAX_XLAT_TABLES 10
+#define MAX_MMAP_REGIONS 11
+#endif
+
+#define PLAT_GICD_BASE U(0x2d400000)
+#define PLAT_GICR_BASE U(0x2d440000)
+#define DEVICE0_BASE U(0x20000000)
+#define DEVICE0_SIZE U(0x10000000)
+#define DEVICE1_BASE U(0x30000000)
+#define DEVICE1_SIZE U(0x10000000)
+#define DEVICE2_BASE U(0x8ff00000)
+#define DEVICE2_SIZE U(0x00001000)
+#define IMX_LPUART4_BASE U(0x29390000)
+#define IMX_LPUART5_BASE U(0x293a0000)
+#define IMX_LPUART_BASE IMX_LPUART5_BASE
+#define IMX_CAAM_BASE U(0x292e0000)
+#define IMX_BOOT_UART_CLK_IN_HZ 24000000
+#define IMX_CONSOLE_BAUDRATE 115200
+
+#define IMX_CGC1_BASE U(0x292c0000)
+#define IMX_PCC3_BASE U(0x292d0000)
+#define IMX_PCC4_BASE U(0x29800000)
+#define IMX_SIM2_BASE U(0x2da50000)
+#define IMX_CGC2_BASE U(0x2da60000)
+#define IMX_PCC5_BASE U(0x2da70000)
+#define IMX_MU0B_BASE U(0x29220000)
+#define IMX_CMC1_BASE U(0x29240000)
+#define IMX_WUU1_BASE U(0x29260000)
+#define IMX_SIM1_BASE U(0x29290000)
+#define IMX_GPIOD_BASE U(0x2e200000)
+#define IMX_GPIOE_BASE U(0x2d000000)
+#define IMX_GPIOF_BASE U(0x2d010000)
+#define IMX_WDOG3_BASE U(0x292a0000)
+#define IMX_TPM5_BASE U(0x29340000)
+
+#define SRAM0_BASE U(0x2201F000)
+
+#define IOMUXC_PTD_PCR_BASE U(0x298c0000)
+#define IOMUXC_PTE_PCR_BASE U(0x298c0080)
+#define IOMUXC_PTF_PCR_BASE U(0x298c0100)
+#define IOMUXC_PSMI_BASE0 U(0x298c0800)
+#define IOMUXC_PSMI_BASE1 U(0x298c0838)
+#define IOMUXC_PSMI_BASE2 U(0x298c0954)
+#define IOMUXC_PSMI_BASE3 U(0x298c0994)
+#define IOMUXC_PSMI_BASE4 U(0x298c0a58)
+
+#define IMX_ROM_ENTRY U(0x1000)
+#define COUNTER_FREQUENCY 1000000
+
+#define PLAT_NS_IMAGE_OFFSET 0x80200000
+
+#define BL31_NOBITS_BASE 0x20058000
+#define BL31_NOBITS_LIMIT 0x2006d000
+
+#define BL31_RWDATA_BASE 0x2006d000
+#define BL31_RWDATA_LIMIT 0x20070000
+
+#define BL32_FDT_OVERLAY_ADDR 0x9d000000
+
+#ifdef SPD_trusty
+#define IMX_TRUSTY_STACK_SIZE 0x100
+#endif
+
+/* system memory map define */
+#define DEVICE0_MAP MAP_REGION_FLAT(DEVICE0_BASE, DEVICE0_SIZE, MT_DEVICE | MT_RW)
+#define DEVICE1_MAP MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW)
+/* Map partial DRAM space for DRAM low-power mode control */
+#define DEVICE2_MAP MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, MT_DEVICE | MT_RW)
+ /* MU and FSB */
+#define ELE_MAP MAP_REGION_FLAT(0x27010000, 0x20000, MT_DEVICE | MT_RW | MT_NS)
+#define SEC_SIM_MAP MAP_REGION_FLAT(0x2802B000, 0x1000, MT_DEVICE | MT_RW | MT_NS) /* SEC SIM */
+/* For SCMI shared memory region */
+#define SRAM0_MAP MAP_REGION_FLAT(SRAM0_BASE, 0x1000, MT_RW | MT_DEVICE)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/imx/imx8ulp/include/scmi.h b/plat/imx/imx8ulp/include/scmi.h
new file mode 100644
index 0000000000..03e16f5e83
--- /dev/null
+++ b/plat/imx/imx8ulp/include/scmi.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8_SCMI_H
+#define IMX8_SCMI_H
+
+#include <stdint.h>
+
+#define SCMI_SHMEM_CHANNEL_ERROR BIT_32(1)
+#define SCMI_SHMEM_CHANNEL_FREE BIT_32(0)
+
+#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT_32(0)
+
+enum scmi_std_protocol {
+ SCMI_PROTOCOL_BASE = 0x10,
+ SCMI_PROTOCOL_POWER_DOMAIN = 0x11,
+ SCMI_PROTOCOL_SYS_POWER = 0x12,
+ SCMI_PROTOCOL_PERF_DOMAIN = 0x13,
+ SCMI_PROTOCOL_CLK = 0x14,
+ SCMI_PROTOCOL_SENSOR = 0x15,
+ SCMI_PROTOCOL_RESET_DOMAIN = 0x16,
+};
+
+#define MSG_ID(m) ((m) & 0xff)
+#define MSG_TYPE(m) (((m) >> 8) & 0x3)
+#define MSG_PRO_ID(m) (((m) >> 10) & 0xff)
+#define MSG_TOKEN(m) (((m) >> 18) & 0x3ff)
+
+enum {
+ SCMI_POWER_DOMAIN_PROTOCOL = 0x11,
+ SCMI_SYS_PWR_DOMAIN_PROTOCOL = 0x12,
+ SCMI_PER_DOMAIN_PROTOCOL = 0x13,
+ SCMI_CLK_DOMAIN_PROTOCOL = 0x14,
+ SCMI_SENSOR_PROTOCOL = 0x15,
+};
+
+#define PROTOCOL_VERSION 0
+#define PROTOCOL_ATTRIBUTES 1
+#define PROTOCOL_MESSAGE_ATTRIBUTES 2
+#define BASE_DISCOVER_VENDOR 3
+#define BASE_DISCOVER_SUB_VENDOR 4
+#define BASE_DISCOVER_IMPLEMENTATION_VERSION 5
+#define BASE_DISCOVER_LIST_PROTOCOLS 6
+#define BASE_DISCOVER_AGENT 7
+#define BASE_NOTIFY_ERRORS 8
+#define BASE_SET_DEVICE_PERMISSIONS 9
+#define BASE_SET_PROTOCOL_PERMISSIONS 0xA
+#define BASE_RESET_AGENT_CONFIGURATION 0xB
+
+enum {
+ SCMI_RET_SUCCESS = 0,
+ SCMI_RET_NOT_SUPPORTED = -1,
+ SCMI_RET_INVALID_PARAMETERS = -2,
+ SCMI_RET_DENIED = -3,
+ SCMI_RET_NOT_FOUND = -4,
+ SCMI_RET_OUT_OF_RANGE = -5,
+ SCMI_RET_BUSY = -6,
+ SCMI_RET_COMMS_ERROR = -7,
+ SCMI_RET_GENERIC_ERROR = -8,
+ SCMI_RET_HARDWARE_ERROR = -9,
+ SCMI_RET_PROTOCOL_ERROR = -10,
+};
+
+#define POWER_DOMAIN_ATTRIBUTES 3
+#define POWER_DOMAIN_SUPPORT_NOTIFICATION BIT(31)
+#define POWER_DOMAIN_SUPPORT_ASYNCHRONOUS BIT(30)
+#define POWER_DOMAIN_SUPPORT_SYNCHRONOUS BIT(29)
+
+#define POWER_STATE_SET 4
+#define POWER_STATE_GET 5
+#define POWER_STATE_NOTIFY 6
+#define POWER_STATE_CHANGE_REQUESTED_NOTIFY 7
+
+int scmi_power_domain_handler(uint32_t msg_id, void *shmem);
+
+#define PERFORMANCE_DOMAIN_ATTRIBUTES 3
+#define PERFORMANCE_DESCRIBE_LEVELS 4
+#define PERFORMANCE_LIMITS_SET 5
+#define PERFORMANCE_LIMITS_GET 6
+#define PERFORMANCE_LEVEL_SET 7
+#define PERFORMANCE_LEVEL_GET 8
+#define PERFORMANCE_NOTIFY_LIMITS 9
+#define PERFORMANCE_NOTIFY_LEVEL 0xA
+#define PERFORMANCE_DESCRIBE_FAST_CHANNEL 0xB
+
+int scmi_perf_domain_handler(uint32_t msg_id, void *shmem);
+
+#define SENSOR_DESCRIPTION_GET 0x003
+#define SENSOR_CONFIG_SET 0x004
+#define SENSOR_TRIP_POINT_SET 0x005
+#define SENSOR_READING_GET 0x006
+
+int scmi_sensor_handler(uint32_t msg_id, void *shmem);
+
+#define SMC_SHMEM_BASE 0x2201f000
+
+#endif /* IMX8_SCMI_H */
diff --git a/plat/imx/imx8ulp/include/scmi_sensor.h b/plat/imx/imx8ulp/include/scmi_sensor.h
new file mode 100644
index 0000000000..5dab898697
--- /dev/null
+++ b/plat/imx/imx8ulp/include/scmi_sensor.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Description:
+ * System Control and Management Interface (SCMI) support.
+ */
+
+#ifndef INTERNAL_SCMI_SENSOR_H
+#define INTERNAL_SCMI_SENSOR_H
+
+#include <stdint.h>
+
+#define SCMI_PROTOCOL_VERSION_SENSOR UINT32_C(0x10000)
+
+/*
+ * PROTOCOL_ATTRIBUTES
+ */
+struct scmi_sensor_protocol_attributes_p2a {
+ int32_t status;
+ uint32_t attributes;
+ uint32_t sensor_reg_address_low;
+ uint32_t sensor_reg_address_high;
+ uint32_t sensor_reg_len;
+};
+
+/*
+ * SENSOR_READING_GET
+ */
+#define SCMI_SENSOR_PROTOCOL_READING_GET_ASYNC_FLAG_MASK (1 << 0)
+
+struct scmi_sensor_protocol_reading_get_a2p {
+ uint32_t sensor_id;
+ uint32_t flags;
+};
+
+struct scmi_sensor_protocol_reading_get_p2a {
+ int32_t status;
+ uint32_t sensor_value_low;
+ uint32_t sensor_value_high;
+};
+
+/*
+ * SENSOR_DESCRIPTION_GET
+ */
+ #define SCMI_SENSOR_DESCS_MAX(MAILBOX_SIZE) \
+ ((sizeof(struct scmi_sensor_protocol_description_get_p2a) < MAILBOX_SIZE) \
+ ? ((MAILBOX_SIZE - \
+ sizeof(struct scmi_sensor_protocol_description_get_p2a)) \
+ / sizeof(struct scmi_sensor_desc)) \
+ : 0)
+
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_TYPE_POS 0
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_POS 11
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_POS 22
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_INTERVAL_POS 27
+
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_TYPE_MASK \
+ (UINT32_C(0xFF) << SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_TYPE_POS)
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_MASK \
+ (UINT32_C(0x1F) << SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_POS)
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_MASK \
+ (UINT32_C(0x1F) << \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_POS)
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_INTERVAL_MASK \
+ (UINT32_C(0x1F) << SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_INTERVAL_POS)
+
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_MAX \
+ (int32_t)(SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_MASK >> 1)
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_MIN \
+ (-(SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_MAX + 1))
+
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_MAX \
+ (int32_t)(SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_INTERVAL_MASK >> 1)
+#define SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_MIN \
+ (-(SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_MAX + 1))
+
+#define SCMI_SENSOR_DESC_ATTRIBUTES_HIGH(SENSOR_TYPE, UNIT_MULTIPLIER, \
+ UPDATE_MULTIPLIER, UPDATE_INTERVAL) \
+ ( \
+ (((SENSOR_TYPE) << \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_TYPE_POS) & \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_TYPE_MASK) | \
+ (((UNIT_MULTIPLIER) << \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_POS) & \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UNIT_MULTIPLIER_MASK) | \
+ (((UPDATE_MULTIPLIER) << \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_POS) & \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_MULTIPLIER_MASK) | \
+ (((UPDATE_INTERVAL) << \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_INTERVAL_POS) & \
+ SCMI_SENSOR_DESC_ATTRS_HIGH_SENSOR_UPDATE_INTERVAL_MASK) \
+ )
+
+#define SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_DESCS_POS 0
+#define SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_REMAINING_DESCS_POS 16
+
+#define SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_DESCS_MASK \
+ (UINT32_C(0xFFF) << SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_DESCS_POS)
+#define SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_REMAINING_DESCS_MASK \
+ (UINT32_C(0xFFFF) << SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_REMAINING_DESCS_POS)
+
+#define SCMI_SENSOR_NUM_SENSOR_FLAGS(NUM_DESCS, NUM_REMAINING_DESCS) \
+ ( \
+ (((NUM_DESCS) << \
+ SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_DESCS_POS) & \
+ SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_DESCS_MASK) | \
+ (((NUM_REMAINING_DESCS) << \
+ SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_REMAINING_DESCS_POS) & \
+ SCMI_SENSOR_NUM_SENSOR_FLAGS_NUM_REMAINING_DESCS_MASK) \
+ )
+
+#define SCMI_SENSOR_NAME_LEN 16
+
+struct scmi_sensor_desc {
+ uint32_t sensor_id;
+ uint32_t sensor_attributes_low;
+ uint32_t sensor_attributes_high;
+ char sensor_name[SCMI_SENSOR_NAME_LEN];
+};
+
+struct scmi_sensor_protocol_description_get_a2p {
+ uint32_t desc_index;
+};
+
+struct scmi_sensor_protocol_description_get_p2a {
+ int32_t status;
+ uint32_t num_sensor_flags;
+ struct scmi_sensor_desc sensor_desc[];
+};
+
+/* Event indices */
+enum scmi_sensor_api_idx {
+ SCMI_SENSOR_EVENT_IDX_REQUEST,
+ SCMI_SENSOR_EVENT_IDX_COUNT,
+};
+
+#endif /* INTERNAL_SCMI_SENSOR_H */
diff --git a/plat/imx/imx8ulp/include/xrdc.h b/plat/imx/imx8ulp/include/xrdc.h
new file mode 100644
index 0000000000..15250f033f
--- /dev/null
+++ b/plat/imx/imx8ulp/include/xrdc.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IMX8ULP_XRDC_H
+#define IMX8ULP_XRDC_H
+
+#define DID_MAX 8
+#define PAC_SLOT_ALL 128
+#define MSC_SLOT_ALL 8
+
+enum xrdc_mda_sa {
+ MDA_SA_S,
+ MDA_SA_NS,
+ MDA_SA_PT, /* pass through master's secure/nonsecure attribute */
+};
+
+struct xrdc_mda_config {
+ uint16_t mda_id;
+ uint16_t did;
+ enum xrdc_mda_sa sa;
+};
+
+struct xrdc_pac_msc_config {
+ uint16_t pac_msc_id;
+ uint16_t slot_id;
+ uint8_t dsel[DID_MAX];
+};
+
+struct xrdc_mrc_config {
+ uint16_t mrc_id;
+ uint16_t region_id;
+ uint32_t region_start;
+ uint32_t region_size;
+ uint8_t dsel[DID_MAX];
+ uint16_t accset[2];
+};
+
+/* APIs to apply and enable XRDC */
+int xrdc_apply_lpav_config(void);
+int xrdc_apply_hifi_config(void);
+int xrdc_apply_apd_config(void);
+void xrdc_enable(void);
+
+#endif
diff --git a/plat/imx/imx8ulp/platform.mk b/plat/imx/imx8ulp/platform.mk
new file mode 100644
index 0000000000..f1e53ca962
--- /dev/null
+++ b/plat/imx/imx8ulp/platform.mk
@@ -0,0 +1,69 @@
+#
+# Copyright 2021-2024 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Translation tables library
+include lib/xlat_tables_v2/xlat_tables.mk
+
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+
+PLAT_INCLUDES := -Iplat/imx/imx8ulp/include \
+ -Iplat/imx/common/include \
+ -Iplat/imx/imx8ulp/upower
+
+IMX_GIC_SOURCES := ${GICV3_SOURCES} \
+ plat/common/plat_gicv3.c \
+ plat/common/plat_psci_common.c \
+ plat/imx/common/plat_imx8_gic.c
+
+BL31_SOURCES += plat/imx/common/lpuart_console.S \
+ plat/imx/common/imx8_helpers.S \
+ plat/imx/imx8ulp/imx8ulp_bl31_setup.c \
+ plat/imx/imx8ulp/imx8ulp_psci.c \
+ plat/imx/imx8ulp/apd_context.c \
+ plat/imx/common/imx8_topology.c \
+ plat/imx/common/imx_sip_svc.c \
+ plat/imx/common/imx_sip_handler.c \
+ plat/imx/common/imx_bl31_common.c \
+ plat/common/plat_psci_common.c \
+ lib/cpus/aarch64/cortex_a35.S \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ plat/imx/imx8ulp/xrdc/xrdc_core.c \
+ plat/imx/imx8ulp/imx8ulp_caam.c \
+ plat/imx/imx8ulp/dram.c \
+ drivers/scmi-msg/base.c \
+ drivers/scmi-msg/entry.c \
+ drivers/scmi-msg/smt.c \
+ drivers/scmi-msg/power_domain.c \
+ drivers/scmi-msg/sensor.c \
+ plat/imx/imx8ulp/scmi/scmi.c \
+ plat/imx/imx8ulp/scmi/scmi_pd.c \
+ plat/imx/imx8ulp/scmi/scmi_sensor.c \
+ plat/imx/imx8ulp/upower/upower_api.c \
+ plat/imx/imx8ulp/upower/upower_hal.c \
+ ${XLAT_TABLES_LIB_SRCS} \
+ ${IMX_GIC_SOURCES}
+
+ifeq ($(findstring clang,$(notdir $(CC))),)
+ TF_CFLAGS_aarch64 += -fno-strict-aliasing
+endif
+
+USE_COHERENT_MEM := 1
+RESET_TO_BL31 := 1
+SEPARATE_NOBITS_REGION := 1
+SEPARATE_RWDATA_REGION := 1
+PROGRAMMABLE_RESET_ADDRESS := 1
+COLD_BOOT_SINGLE_CPU := 1
+WARMBOOT_ENABLE_DCACHE_EARLY := 1
+BL32_BASE ?= 0xa6000000
+BL32_SIZE ?= 0x2000000
+$(eval $(call add_define,BL32_BASE))
+$(eval $(call add_define,BL32_SIZE))
+
+ifeq (${SPD},trusty)
+ BL31_CFLAGS += -DPLAT_XLAT_TABLES_DYNAMIC=1
+endif
diff --git a/plat/imx/imx8ulp/scmi/scmi.c b/plat/imx/imx8ulp/scmi/scmi.c
new file mode 100644
index 0000000000..5d3e7d7283
--- /dev/null
+++ b/plat/imx/imx8ulp/scmi/scmi.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <assert.h>
+#include <stdint.h>
+
+#include <drivers/scmi-msg.h>
+#include <drivers/scmi.h>
+
+#include <platform_def.h>
+
+#define SMT_BUFFER_BASE 0x2201f000
+#define SMT_BUFFER0_BASE SMT_BUFFER_BASE
+#define SMT_BUFFER1_BASE (SMT_BUFFER_BASE + 0x200)
+
+static struct scmi_msg_channel scmi_channel[] = {
+ [0] = {
+ .shm_addr = SMT_BUFFER0_BASE,
+ .shm_size = SMT_BUF_SLOT_SIZE,
+ },
+};
+
+struct scmi_msg_channel *plat_scmi_get_channel(unsigned int agent_id)
+{
+ assert(agent_id < ARRAY_SIZE(scmi_channel));
+
+ return &scmi_channel[agent_id];
+}
+
+static const char vendor[] = "NXP";
+static const char sub_vendor[] = "";
+
+const char *plat_scmi_vendor_name(void)
+{
+ return vendor;
+}
+
+const char *plat_scmi_sub_vendor_name(void)
+{
+ return sub_vendor;
+}
+
+/* Currently supporting Clocks and Reset Domains */
+static const uint8_t plat_protocol_list[] = {
+ SCMI_PROTOCOL_ID_POWER_DOMAIN,
+ SCMI_PROTOCOL_ID_SENSOR,
+ 0U /* Null termination */
+};
+
+size_t plat_scmi_protocol_count(void)
+{
+ return ARRAY_SIZE(plat_protocol_list) - 1U;
+}
+
+const uint8_t *plat_scmi_protocol_list(unsigned int agent_id __unused)
+{
+ return plat_protocol_list;
+}
+
+void imx8ulp_init_scmi_server(void)
+{
+ size_t i;
+
+ for (i = 0U; i < ARRAY_SIZE(scmi_channel); i++) {
+ scmi_smt_init_agent_channel(&scmi_channel[i]);
+ }
+}
diff --git a/plat/imx/imx8ulp/scmi/scmi_pd.c b/plat/imx/imx8ulp/scmi/scmi_pd.c
new file mode 100644
index 0000000000..8e7e5d6635
--- /dev/null
+++ b/plat/imx/imx8ulp/scmi/scmi_pd.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <inttypes.h>
+#include <lib/libc/errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <common/debug.h>
+#include <drivers/scmi.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <platform_def.h>
+#include <scmi.h>
+
+#include <upower_api.h>
+
+#define POWER_STATE_ON (0 << 30)
+#define POWER_STATE_OFF (1 << 30)
+
+extern bool is_lpav_owned_by_apd(void);
+
+enum {
+ PS0 = 0,
+ PS1 = 1,
+ PS2 = 2,
+ PS3 = 3,
+ PS4 = 4,
+ PS5 = 5,
+ PS6 = 6,
+ PS7 = 7,
+ PS8 = 8,
+ PS9 = 9,
+ PS10 = 10,
+ PS11 = 11,
+ PS12 = 12,
+ PS13 = 13,
+ PS14 = 14,
+ PS15 = 15,
+ PS16 = 16,
+ PS17 = 17,
+ PS18 = 18,
+ PS19 = 19,
+};
+
+#define SRAM_DMA1 BIT(6)
+#define SRAM_FLEXSPI2 BIT(7)
+#define SRAM_USB0 BIT(10)
+#define SRAM_USDHC0 BIT(11)
+#define SRAM_USDHC1 BIT(12)
+#define SRAM_USDHC2_USB1 BIT(13)
+#define SRAM_DCNANO GENMASK_32(18, 17)
+#define SRAM_EPDC GENMASK_32(20, 19)
+#define SRAM_DMA2 BIT(21)
+#define SRAM_GPU2D GENMASK_32(23, 22)
+#define SRAM_GPU3D GENMASK_32(25, 24)
+#define SRAM_HIFI4 BIT(26)
+#define SRAM_ISI_BUFFER BIT(27)
+#define SRAM_MIPI_CSI_FIFO BIT(28)
+#define SRAM_MIPI_DSI_FIFO BIT(29)
+#define SRAM_PXP BIT(30)
+
+#define SRAM_DMA0 BIT_64(33)
+#define SRAM_FLEXCAN BIT_64(34)
+#define SRAM_FLEXSPI0 BIT_64(35)
+#define SRAM_FLEXSPI1 BIT_64(36)
+
+struct psw {
+ char *name;
+ uint32_t reg;
+ int power_state;
+ uint32_t count;
+ int flags;
+};
+
+#define ALWAYS_ON BIT(0)
+
+static struct psw imx8ulp_psw[] = {
+ [PS6] = { .name = "PS6", .reg = PS6, .flags = ALWAYS_ON, .power_state = POWER_STATE_ON },
+ [PS7] = { .name = "PS7", .reg = PS7, .power_state = POWER_STATE_OFF },
+ [PS8] = { .name = "PS8", .reg = PS8, .power_state = POWER_STATE_OFF },
+ [PS13] = { .name = "PS13", .reg = PS13, .power_state = POWER_STATE_OFF },
+ [PS14] = { .name = "PS14", .reg = PS14, .flags = ALWAYS_ON, .power_state = POWER_STATE_OFF },
+ [PS15] = { .name = "PS15", .reg = PS15, .power_state = POWER_STATE_OFF },
+ [PS16] = { .name = "PS16", .reg = PS16, .flags = ALWAYS_ON, .power_state = POWER_STATE_ON },
+};
+
+struct power_domain {
+ char *name;
+ uint32_t reg;
+ uint32_t psw_parent;
+ uint32_t sram_parent;
+ uint64_t bits;
+ uint32_t power_state;
+ bool lpav; /* belong to lpav domain */
+ uint32_t sw_rst_reg; /* pcc sw reset reg offset */
+};
+
+/* The Rich OS need flow the macro */
+#define IMX8ULP_PD_DMA1 0
+#define IMX8ULP_PD_FLEXSPI2 1
+#define IMX8ULP_PD_USB0 2
+#define IMX8ULP_PD_USDHC0 3
+#define IMX8ULP_PD_USDHC1 4
+#define IMX8ULP_PD_USDHC2_USB1 5
+#define IMX8ULP_PD_DCNANO 6
+#define IMX8ULP_PD_EPDC 7
+#define IMX8ULP_PD_DMA2 8
+#define IMX8ULP_PD_GPU2D 9
+#define IMX8ULP_PD_GPU3D 10
+#define IMX8ULP_PD_HIFI4 11
+#define IMX8ULP_PD_ISI 12
+#define IMX8ULP_PD_MIPI_CSI 13
+#define IMX8ULP_PD_MIPI_DSI 14
+#define IMX8ULP_PD_PXP 15
+
+#define IMX8ULP_PD_PS6 16
+#define IMX8ULP_PD_PS7 17
+#define IMX8ULP_PD_PS8 18
+#define IMX8ULP_PD_PS13 19
+#define IMX8ULP_PD_PS14 20
+#define IMX8ULP_PD_PS15 21
+#define IMX8ULP_PD_PS16 22
+#define IMX8ULP_PD_MAX 23
+
+/* LPAV peripheral PCC */
+#define PCC_GPU2D (IMX_PCC5_BASE + 0xf0)
+#define PCC_GPU3D (IMX_PCC5_BASE + 0xf4)
+#define PCC_EPDC (IMX_PCC5_BASE + 0xcc)
+#define PCC_CSI (IMX_PCC5_BASE + 0xbc)
+#define PCC_PXP (IMX_PCC5_BASE + 0xd0)
+
+#define PCC_SW_RST BIT(28)
+
+#define PWR_DOMAIN(_name, _reg, _psw_parent, _sram_parent, \
+ _bits, _state, _lpav, _rst_reg) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .psw_parent = _psw_parent, \
+ .sram_parent = _sram_parent, \
+ .bits = _bits, \
+ .power_state = _state, \
+ .lpav = _lpav, \
+ .sw_rst_reg = _rst_reg, \
+ }
+
+static struct power_domain scmi_power_domains[] = {
+ PWR_DOMAIN("DMA1", IMX8ULP_PD_DMA1, PS6, PS6, SRAM_DMA1, POWER_STATE_OFF, false, 0U),
+ PWR_DOMAIN("FLEXSPI2", IMX8ULP_PD_FLEXSPI2, PS6, PS6, SRAM_FLEXSPI2, POWER_STATE_OFF, false, 0U),
+ PWR_DOMAIN("USB0", IMX8ULP_PD_USB0, PS6, PS6, SRAM_USB0, POWER_STATE_OFF, false, 0U),
+ PWR_DOMAIN("USDHC0", IMX8ULP_PD_USDHC0, PS6, PS6, SRAM_USDHC0, POWER_STATE_OFF, false, 0U),
+ PWR_DOMAIN("USDHC1", IMX8ULP_PD_USDHC1, PS6, PS6, SRAM_USDHC1, POWER_STATE_OFF, false, 0U),
+ PWR_DOMAIN("USDHC2_USB1", IMX8ULP_PD_USDHC2_USB1, PS6, PS6, SRAM_USDHC2_USB1, POWER_STATE_OFF, false, 0U),
+ PWR_DOMAIN("DCNano", IMX8ULP_PD_DCNANO, PS16, PS16, SRAM_DCNANO, POWER_STATE_OFF, true, 0U),
+ PWR_DOMAIN("EPDC", IMX8ULP_PD_EPDC, PS13, PS13, SRAM_EPDC, POWER_STATE_OFF, true, PCC_EPDC),
+ PWR_DOMAIN("DMA2", IMX8ULP_PD_DMA2, PS16, PS16, SRAM_DMA2, POWER_STATE_OFF, true, 0U),
+ PWR_DOMAIN("GPU2D", IMX8ULP_PD_GPU2D, PS16, PS16, SRAM_GPU2D, POWER_STATE_OFF, true, PCC_GPU2D),
+ PWR_DOMAIN("GPU3D", IMX8ULP_PD_GPU3D, PS7, PS7, SRAM_GPU3D, POWER_STATE_OFF, true, PCC_GPU3D),
+ PWR_DOMAIN("HIFI4", IMX8ULP_PD_HIFI4, PS8, PS8, SRAM_HIFI4, POWER_STATE_OFF, true, 0U),
+ PWR_DOMAIN("ISI", IMX8ULP_PD_ISI, PS16, PS16, SRAM_ISI_BUFFER, POWER_STATE_OFF, true, 0U),
+ PWR_DOMAIN("MIPI_CSI", IMX8ULP_PD_MIPI_CSI, PS15, PS16, SRAM_MIPI_CSI_FIFO, POWER_STATE_OFF, true, PCC_CSI),
+ PWR_DOMAIN("MIPI_DSI", IMX8ULP_PD_MIPI_DSI, PS14, PS16, SRAM_MIPI_DSI_FIFO, POWER_STATE_OFF, true, 0U),
+ PWR_DOMAIN("PXP", IMX8ULP_PD_PXP, PS13, PS13, SRAM_PXP | SRAM_EPDC, POWER_STATE_OFF, true, PCC_PXP)
+};
+
+size_t plat_scmi_pd_count(unsigned int agent_id __unused)
+{
+ return ARRAY_SIZE(scmi_power_domains);
+}
+
+const char *plat_scmi_pd_get_name(unsigned int agent_id __unused,
+ unsigned int pd_id)
+{
+ if (pd_id >= IMX8ULP_PD_PS6) {
+ return imx8ulp_psw[pd_id - IMX8ULP_PD_PS6].name;
+ }
+
+ return scmi_power_domains[pd_id].name;
+}
+
+unsigned int plat_scmi_pd_get_state(unsigned int agent_id __unused,
+ unsigned int pd_id __unused)
+{
+ if (pd_id >= IMX8ULP_PD_PS6) {
+ return imx8ulp_psw[pd_id - IMX8ULP_PD_PS6].power_state;
+ }
+
+ return scmi_power_domains[pd_id].power_state;
+}
+
+extern void upower_wait_resp(void);
+int upwr_pwm_power(const uint32_t swton[], const uint32_t memon[], bool on)
+{
+ int ret_val;
+ int ret;
+
+ if (on == true) {
+ ret = upwr_pwm_power_on(swton, memon, NULL);
+ } else {
+ ret = upwr_pwm_power_off(swton, memon, NULL);
+ }
+
+ if (ret != 0U) {
+ WARN("%s failed: ret: %d, state: %x\n", __func__, ret, on);
+ return ret;
+ }
+
+ upower_wait_resp();
+
+ ret = upwr_poll_req_status(UPWR_SG_PWRMGMT, NULL, NULL, &ret_val, 1000);
+ if (ret != UPWR_REQ_OK) {
+ WARN("Failure %d, %s\n", ret, __func__);
+ if (ret == UPWR_REQ_BUSY) {
+ return -EBUSY;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int32_t plat_scmi_pd_psw(unsigned int index, unsigned int state)
+{
+ uint32_t psw_parent = scmi_power_domains[index].psw_parent;
+ uint32_t sram_parent = scmi_power_domains[index].sram_parent;
+ uint64_t swt;
+ bool on;
+ int ret = 0;
+
+ if ((imx8ulp_psw[psw_parent].flags & ALWAYS_ON) != 0U &&
+ (imx8ulp_psw[sram_parent].flags & ALWAYS_ON) != 0U) {
+ return 0;
+ }
+
+ on = (state == POWER_STATE_ON) ? true : false;
+
+ if ((imx8ulp_psw[psw_parent].flags & ALWAYS_ON) == 0U) {
+ swt = 1 << imx8ulp_psw[psw_parent].reg;
+ if (imx8ulp_psw[psw_parent].count == 0U) {
+ if (on == false) {
+ WARN("off PSW[%d] that already in off state\n", psw_parent);
+ ret = -EACCES;
+ } else {
+ ret = upwr_pwm_power((const uint32_t *)&swt, NULL, on);
+ imx8ulp_psw[psw_parent].count++;
+ }
+ } else {
+ if (on == true) {
+ imx8ulp_psw[psw_parent].count++;
+ } else {
+ imx8ulp_psw[psw_parent].count--;
+ }
+
+ if (imx8ulp_psw[psw_parent].count == 0U) {
+ ret = upwr_pwm_power((const uint32_t *)&swt, NULL, on);
+ }
+ }
+ }
+
+ if (!(imx8ulp_psw[sram_parent].flags & ALWAYS_ON) && (psw_parent != sram_parent)) {
+ swt = 1 << imx8ulp_psw[sram_parent].reg;
+ if (imx8ulp_psw[sram_parent].count == 0U) {
+ if (on == false) {
+ WARN("off PSW[%d] that already in off state\n", sram_parent);
+ ret = -EACCES;
+ } else {
+ ret = upwr_pwm_power((const uint32_t *)&swt, NULL, on);
+ imx8ulp_psw[sram_parent].count++;
+ }
+ } else {
+ if (on == true) {
+ imx8ulp_psw[sram_parent].count++;
+ } else {
+ imx8ulp_psw[sram_parent].count--;
+ }
+
+ if (imx8ulp_psw[sram_parent].count == 0U) {
+ ret = upwr_pwm_power((const uint32_t *)&swt, NULL, on);
+ }
+ }
+ }
+
+ return ret;
+}
+
+bool pd_allow_power_off(unsigned int pd_id)
+{
+ if (scmi_power_domains[pd_id].lpav) {
+ if (!is_lpav_owned_by_apd()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void assert_pcc_reset(unsigned int pcc)
+{
+ /* if sw_rst_reg is valid, assert the pcc reset */
+ if (pcc != 0U) {
+ mmio_clrbits_32(pcc, PCC_SW_RST);
+ }
+}
+
+int32_t plat_scmi_pd_set_state(unsigned int agent_id __unused,
+ unsigned int flags,
+ unsigned int pd_id,
+ unsigned int state)
+{
+ unsigned int ps_idx;
+ uint64_t mem;
+ bool on;
+ int ret;
+
+ if (flags != 0U || pd_id >= IMX8ULP_PD_PS6) {
+ return SCMI_NOT_SUPPORTED;
+ }
+
+ ps_idx = 0;
+ while (ps_idx < IMX8ULP_PD_PS6 && scmi_power_domains[ps_idx].reg != pd_id) {
+ ps_idx++;
+ }
+
+ if (ps_idx == IMX8ULP_PD_PS6) {
+ return SCMI_NOT_FOUND;
+ }
+
+ if (state == scmi_power_domains[ps_idx].power_state) {
+ return SCMI_SUCCESS;
+ }
+
+ mem = scmi_power_domains[ps_idx].bits;
+ on = (state == POWER_STATE_ON ? true : false);
+ if (on == true) {
+ /* Assert pcc sw reset if necessary */
+ assert_pcc_reset(scmi_power_domains[ps_idx].sw_rst_reg);
+
+ ret = plat_scmi_pd_psw(ps_idx, state);
+ if (ret != 0U) {
+ return SCMI_DENIED;
+ }
+
+ ret = upwr_pwm_power(NULL, (const uint32_t *)&mem, on);
+ if (ret != 0U) {
+ return SCMI_DENIED;
+ }
+ } else {
+ if (!pd_allow_power_off(ps_idx)) {
+ return SCMI_DENIED;
+ }
+
+ ret = upwr_pwm_power(NULL, (const uint32_t *)&mem, on);
+ if (ret != 0U) {
+ return SCMI_DENIED;
+ }
+
+ ret = plat_scmi_pd_psw(ps_idx, state);
+ if (ret != 0U) {
+ return SCMI_DENIED;
+ }
+ }
+
+ scmi_power_domains[pd_id].power_state = state;
+
+ return SCMI_SUCCESS;
+}
diff --git a/plat/imx/imx8ulp/scmi/scmi_sensor.c b/plat/imx/imx8ulp/scmi/scmi_sensor.c
new file mode 100644
index 0000000000..6976b2e2b3
--- /dev/null
+++ b/plat/imx/imx8ulp/scmi/scmi_sensor.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/libc/errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../../drivers/scmi-msg/sensor.h"
+
+#include <common/debug.h>
+#include <drivers/scmi.h>
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+#include <scmi.h>
+
+#include <upower_api.h>
+
+/* Only Temperature now */
+static uint16_t imx_scmi_sensor_count(unsigned int agent_id __unused)
+{
+ return 1U;
+}
+
+uint8_t imx_scmi_sensor_max_requests(unsigned int agent_id __unused)
+{
+ return 1U;
+}
+
+extern int upower_read_temperature(uint32_t sensor_id, int32_t *temperature);
+int imx_scmi_sensor_reading_get(uint32_t agent_id __unused, uint16_t sensor_id __unused,
+ uint32_t *val)
+{
+ int32_t temperature;
+ int ret;
+
+ ret = upower_read_temperature(1, &temperature);
+ if (ret != 0U) {
+ val[0] = 0xFFFFFFFF;
+ } else {
+ val[0] = temperature;
+ }
+
+ val[1] = 0;
+ val[2] = 0;
+ val[3] = 0;
+
+ return ret;
+}
+
+#define SCMI_SENSOR_NAME_LENGTH_MAX 16U
+
+uint32_t imx_scmi_sensor_state(uint32_t agent_id __unused, uint16_t sensor_id __unused)
+{
+ return 1U;
+}
+
+uint32_t imx_scmi_sensor_description_get(uint32_t agent_id __unused, uint16_t desc_index __unused,
+ struct scmi_sensor_desc *desc __unused)
+{
+ desc->id = 0;
+ desc->attr_low = 0;
+ desc->attr_high = 2;
+ strlcpy((char *)desc->name, "UPOWER-TEMP", 12);
+ desc->power = 0;
+ desc->resolution = 0;
+ desc->min_range_low = 0;
+ desc->min_range_high = 0x80000000;
+ desc->max_range_low = 0xffffffff;
+ desc->max_range_high = 0x7fffffff;
+
+ return 1U;
+}
+
+REGISTER_SCMI_SENSOR_OPS(imx_scmi_sensor_count,
+ imx_scmi_sensor_max_requests,
+ NULL,
+ imx_scmi_sensor_reading_get,
+ imx_scmi_sensor_description_get,
+ NULL,
+ imx_scmi_sensor_state,
+ NULL);
diff --git a/plat/imx/imx8ulp/upower/upmu.h b/plat/imx/imx8ulp/upower/upmu.h
new file mode 100644
index 0000000000..ce4f47ed98
--- /dev/null
+++ b/plat/imx/imx8ulp/upower/upmu.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2021-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MU_H
+#define MU_H
+
+#include <stdint.h>
+
+typedef volatile unsigned int vuint32_t;
+
+/****************************************************************************/
+/* MODULE: Message Unit */
+/****************************************************************************/
+/* VER Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t FEATURE : 16;
+ vuint32_t MINOR : 8;
+ vuint32_t MAJOR : 8;
+ } B;
+} MU_VER_t;
+
+/* PAR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t TR_NUM : 8;
+ vuint32_t RR_NUM : 8;
+ vuint32_t GIR_NUM : 8;
+ vuint32_t FLAG_WIDTH : 8;
+ } B;
+} MU_PAR_t;
+
+/* CR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t MUR : 1;
+ vuint32_t MURIE : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_CR_t;
+
+/* SR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t MURS : 1;
+ vuint32_t MURIP : 1;
+ vuint32_t EP : 1;
+ vuint32_t FUP : 1;
+ vuint32_t GIRP : 1;
+ vuint32_t TEP : 1;
+ vuint32_t RFP : 1;
+ vuint32_t CEP : 1;
+ vuint32_t rsrv_1 : 24;
+
+ } B;
+} MU_SR_t;
+
+/* CCR0 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t NMI : 1;
+ vuint32_t HR : 1;
+ vuint32_t HRM : 1;
+ vuint32_t CLKE : 1;
+ vuint32_t RSTH : 1;
+ vuint32_t BOOT : 2;
+ vuint32_t rsrv_1 : 25;
+
+ } B;
+} MU_CCR0_t;
+
+/* CIER0 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t rsrv_1 : 1;
+ vuint32_t HRIE : 1;
+ vuint32_t RUNIE : 1;
+ vuint32_t RAIE : 1;
+ vuint32_t HALTIE : 1;
+ vuint32_t WAITIE : 1;
+ vuint32_t STOPIE : 1;
+ vuint32_t PDIE : 1;
+ vuint32_t rsrv_2 : 24;
+ } B;
+} MU_CIER0_t;
+
+/* CSSR0 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t NMIC : 1;
+ vuint32_t HRIP : 1;
+ vuint32_t RUN : 1;
+ vuint32_t RAIP : 1;
+ vuint32_t HALT : 1;
+ vuint32_t WAIT : 1;
+ vuint32_t STOP : 1;
+ vuint32_t PD : 1;
+ vuint32_t rsrv_1 : 24;
+ } B;
+} MU_CSSR0_t;
+
+/* CSR0 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t rsrv_1 : 1;
+ vuint32_t HRIP : 1;
+ vuint32_t RUN : 1;
+ vuint32_t RAIP : 1;
+ vuint32_t HALT : 1;
+ vuint32_t WAIT : 1;
+ vuint32_t STOP : 1;
+ vuint32_t PD : 1;
+ vuint32_t rsrv_2 : 24;
+ } B;
+} MU_CSR0_t;
+
+/* FCR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t F0 : 1;
+ vuint32_t F1 : 1;
+ vuint32_t F2 : 1;
+ vuint32_t rsrv_1 : 29;
+ } B;
+} MU_FCR_t;
+
+/* FSR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t F0 : 1;
+ vuint32_t F1 : 1;
+ vuint32_t F2 : 1;
+ vuint32_t rsrv_1 : 29;
+ } B;
+} MU_FSR_t;
+
+/* GIER Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t GIE0 : 1;
+ vuint32_t GIE1 : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_GIER_t;
+
+/* GCR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t GIR0 : 1;
+ vuint32_t GIR1 : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_GCR_t;
+
+/* GSR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t GIP0 : 1;
+ vuint32_t GIP1 : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_GSR_t;
+
+/* TCR Register */
+typedef union{
+ vuint32_t R;
+ struct {
+ vuint32_t TIE0 : 1;
+ vuint32_t TIE1 : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_TCR_t;
+
+/* TSR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t TE0 : 1;
+ vuint32_t TE1 : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_TSR_t;
+
+/* RCR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t RIE0 : 1;
+ vuint32_t RIE1 : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_RCR_t;
+
+/* RSR Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t RF0 : 1;
+ vuint32_t RF1 : 1;
+ vuint32_t rsrv_1 : 30;
+ } B;
+} MU_RSR_t;
+
+/* TR0 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t TR_DATA : 32;
+ } B;
+} MU_TR0_t;
+
+/* TR1 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t TR_DATA : 32;
+ } B;
+} MU_TR1_t;
+
+/* RR0 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t RR_DATA : 32;
+ } B;
+} MU_RR0_t;
+
+/* RR1 Register */
+typedef union {
+ vuint32_t R;
+ struct {
+ vuint32_t RR_DATA : 32;
+ } B;
+} MU_RR1_t;
+
+struct MU_t {
+ MU_VER_t VER;
+ MU_PAR_t PAR;
+ MU_CR_t CR;
+ MU_SR_t SR;
+ MU_CCR0_t CCR0;
+ MU_CIER0_t CIER0;
+ MU_CSSR0_t CSSR0;
+ MU_CSR0_t CSR0;
+ uint8_t MU_reserved0[224];
+ MU_FCR_t FCR;
+ MU_FSR_t FSR;
+ uint8_t MU_reserved1[8];
+ MU_GIER_t GIER;
+ MU_GCR_t GCR;
+ MU_GSR_t GSR;
+ uint8_t MU_reserved2[4];
+ MU_TCR_t TCR;
+ MU_TSR_t TSR;
+ MU_RCR_t RCR;
+ MU_RSR_t RSR;
+ uint8_t MU_reserved3[208];
+ MU_TR0_t TR[2];
+ uint8_t MU_reserved4[120];
+ MU_RR0_t RR[2];
+};
+
+#endif /* MU_H */
diff --git a/plat/imx/imx8ulp/upower/upower_api.c b/plat/imx/imx8ulp/upower/upower_api.c
new file mode 100644
index 0000000000..ce8c1c8436
--- /dev/null
+++ b/plat/imx/imx8ulp/upower/upower_api.c
@@ -0,0 +1,3095 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/**
+ * Copyright 2019-2024 NXP
+ *
+ * KEYWORDS: micro-power uPower driver API
+ */
+
+#include <string.h>
+
+#include "upower_api.h"
+#include "upower_soc_defs.h"
+
+/* ---------------------------------------------------------------
+ * Common Macros
+ * ---------------------------------------------------------------
+ */
+
+/* tests Service Group busy */
+#define UPWR_SG_BUSY(sg) ((sg_busy & (1U << (sg))) == 1U)
+
+/* install user callback for the Service Group */
+#define UPWR_USR_CALLB(sg, cb) { user_callback[(sg)] = (cb); }
+
+/* fills up common message header info */
+#define UPWR_MSG_HDR(hdr, sg, fn) { \
+ (hdr).domain = (uint32_t)pwr_domain; \
+ (hdr).srvgrp = (sg); \
+ (hdr).function = (fn); }
+
+/* ---------------------------------------------------------------
+ * Common Data Structures
+ * ---------------------------------------------------------------
+ */
+static soc_domain_t pwr_domain;
+
+static upwr_code_vers_t fw_rom_version;
+static upwr_code_vers_t fw_ram_version;
+static uint32_t fw_launch_option;
+
+/* shared memory buffers */
+#define UPWR_API_BUFFER_SIZE (MAX_SG_EXCEPT_MEM_SIZE + \
+ MAX_SG_PWRMGMT_MEM_SIZE + MAX_SG_VOLTM_MEM_SIZE)
+
+/* service group shared mem buffer pointers */
+static void *sh_buffer[UPWR_SG_COUNT];
+
+/* Callbacks registered for each service group :
+ *
+ * NULL means no callback is registered;
+ * for sgrp_callback, it also means the service group is
+ * free to receive a new request.
+ */
+static upwr_callb user_callback[UPWR_SG_COUNT];
+static UPWR_RX_CALLB_FUNC_T sgrp_callback[UPWR_SG_COUNT];
+
+/* request data structures for each service group */
+/* message waiting for TX */
+static upwr_down_max_msg sg_req_msg[UPWR_SG_COUNT];
+/* waiting message size */
+static unsigned int sg_req_siz[UPWR_SG_COUNT];
+/* response msg */
+static upwr_up_max_msg sg_rsp_msg[UPWR_SG_COUNT];
+/* response msg size */
+static unsigned int sg_rsp_siz[UPWR_SG_COUNT];
+
+/* tx pending status for each (1 bit per service group) */
+static volatile uint32_t sg_tx_pend;
+/* serv.group of current ongoing Tx, if any */
+static volatile upwr_sg_t sg_tx_curr;
+
+/* service group busy status, only for this domain (MU index 0) */
+/* SG bit = 1 if group is busy with a request */
+static volatile uint32_t sg_busy;
+
+/* OS-dependent memory allocation function */
+static upwr_malloc_ptr_t os_malloc;
+/* OS-dependent pointer->physical address conversion function */
+static upwr_phyadr_ptr_t os_ptr2phy;
+/* OS-dependent function to lock critical code */
+static upwr_lock_ptr_t os_lock;
+
+/* pointer to MU structure */
+static struct MU_t *mu;
+
+/*
+ * indicates that a transmission was done and is pending; this
+ * bit is necessary because the Tx and Rx interrupts are ORed
+ * together, and there is no way of telling if only Rx interrupt
+ * or both occurred just by looking at the MU status registers
+ */
+static uint32_t mu_tx_pend;
+
+static UPWR_TX_CALLB_FUNC_T mu_tx_callb;
+static UPWR_RX_CALLB_FUNC_T mu_rx_callb;
+
+#define UPWR_API_INIT_WAIT (0U) /* waiting for ROM firmware initialization */
+#define UPWR_API_INITLZED (1U) /* ROM firmware initialized */
+#define UPWR_API_START_WAIT (2U) /* waiting for start services */
+#define UPWR_API_SHUTDOWN_WAIT (3U) /* waiting for shutdown */
+#define UPWR_API_READY (4U) /* ready to receive service requests */
+
+volatile upwr_api_state_t api_state;
+
+/* default pointer->physical address conversion, returns the same address */
+static void *ptr2phys(const void *ptr)
+{
+ return (void *)ptr;
+}
+
+/* ---------------------------------------------------------------
+ * SHARED MEMORY MANAGEMENT
+ * --------------------------------------------------------------
+ */
+
+/*
+ * upwr_ptr2offset() - converts a pointer (casted to uint64_t) to an
+ * address offset from the shared memory start. If it does not point
+ * to a shared memory location, the structure pointed is copied to a
+ * buffer in the shared memory, and the buffer offset is returned.
+ * The 2nd argument is the service group to which the buffer belongs;
+ * The 3rd argument is the size of structure to be copied. The 4th argument
+ * is an offset to apply to the copy destination address. The 5th argument
+ * is ptr before the conversion to physical address. 2nd, 3rd. 4th and 5th
+ * arguments are not used if the 1st one points to a location inside the
+ * shared memory.
+ */
+
+static uint32_t upwr_ptr2offset(unsigned long ptr,
+ upwr_sg_t sg,
+ size_t siz,
+ size_t offset,
+ const void *vptr)
+{
+ if ((ptr >= UPWR_DRAM_SHARED_BASE_ADDR) &&
+ ((ptr - UPWR_DRAM_SHARED_BASE_ADDR) < UPWR_DRAM_SHARED_SIZE)) {
+ return (uint32_t)(ptr - UPWR_DRAM_SHARED_BASE_ADDR);
+ }
+
+ /* pointer is outside the shared memory, copy the struct to buffer */
+ (void)memcpy((void *)(offset + (char *)sh_buffer[sg]), (void *)vptr, siz);
+ return (uint32_t)((unsigned long)sh_buffer[sg] + offset - UPWR_DRAM_SHARED_BASE_ADDR);
+}
+
+/*
+ * ---------------------------------------------------------------
+ * INTERRUPTS AND CALLBACKS
+ * Service-group specific callbacks are in their own sections
+ * --------------------------------------------------------------
+ */
+
+/*
+ * upwr_lock()- locks (lock=1) or unlocks (lock=0) a critical code section;
+ * for now it only needs to protect a portion of the code from being
+ * interrupted by the MU.
+ */
+static void upwr_lock(int lock)
+{
+ if (os_lock != NULL) {
+ os_lock(lock);
+ }
+}
+
+/* upwr_exp_isr()- handles the exception interrupt from uPower */
+static void upwr_exp_isr(void)
+{
+}
+
+/* upwr_copy2tr prototype; function definition in auxiliary function section */
+void upwr_copy2tr(struct MU_t *local_mu, const uint32_t *msg, unsigned int size);
+
+#define UPWR_MU_TSR_EMPTY ((uint32_t)((1UL << UPWR_MU_MSG_SIZE) - 1UL))
+
+/* upwr_txrx_isr()- handles both the Tx and Rx MU interrupts */
+void upwr_txrx_isr(void)
+{
+ /* Tx pending and TX register empty */
+ if ((mu_tx_pend != 0UL) && (mu->TSR.R == UPWR_MU_TSR_EMPTY)) {
+ mu_tx_pend = 0UL;
+ /* disable the tx interrupts */
+ mu->TCR.R = 0U;
+ /* urgency flag off, in case it was set */
+ mu->FCR.B.F0 = 0U;
+
+ if (mu_tx_callb != NULL) {
+ mu_tx_callb();
+ }
+ }
+
+ /* RX ISR occurred */
+ if (mu->RSR.R != 0UL) {
+ /* disable the interrupt until data is read */
+ mu->RCR.R = 0U;
+
+ if (mu_rx_callb != NULL) {
+ mu_rx_callb();
+ }
+ }
+}
+
+/**
+ * upwr_next_req() - sends the next pending service request message, if any.
+ *
+ * Called upon MU Tx interrupts, it checks if there is any service request
+ * pending amongst the service groups, and sends the request if needed.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: none (void).
+ */
+static void upwr_next_req(void)
+{
+ upwr_sg_t sg = (upwr_sg_t)0U;
+
+ /* no lock needed here, this is called from an MU ISR */
+ sg_tx_pend &= ~((uint32_t)1UL << sg_tx_curr); /* no longer pending */
+
+ if (sg_tx_pend == 0U) {
+ return; /* no other pending */
+ }
+
+ /* find the next one pending */
+ for (uint32_t mask = 1UL; mask < (1UL << UPWR_SG_COUNT); mask = mask << 1UL) {
+ if ((sg_tx_pend & mask) != 0U) {
+ break;
+ }
+
+ sg = (upwr_sg_t)(sg + 1U);
+ }
+
+ sg_tx_curr = sg;
+ if (upwr_tx((uint32_t *)&sg_req_msg[sg], sg_req_siz[sg], upwr_next_req) < 0) {
+ return; /* leave the Tx pending */
+ }
+}
+
+/**
+ * upwr_mu_int_callback() - general MU interrupt callback.
+ *
+ * Called upon MU Rx interrupts, it calls the Service Group-specific callback,
+ * if any registered, based on the service group field in the received message.
+ * Otherwise, calls the user callback, if any registered.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: none (void).
+ */
+static void upwr_mu_int_callback(void)
+{
+ upwr_sg_t sg; /* service group number */
+ UPWR_RX_CALLB_FUNC_T sg_callb; /* service group callback */
+ upwr_up_max_msg rxmsg = {0};
+ unsigned int size; /* in words */
+
+ if (upwr_rx((char *)&rxmsg, &size) < 0) {
+ return;
+ }
+
+ sg = (upwr_sg_t)rxmsg.hdr.srvgrp;
+
+ /* copy msg to the service group buffer */
+ msg_copy((char *)&sg_rsp_msg[sg], (char *)&rxmsg, size);
+ sg_rsp_siz[sg] = size;
+
+ /* clear the service group busy status */
+ sg_busy &= ~(1UL << sg); /* no lock needed here, we're in the MU ISR */
+
+ sg_callb = sgrp_callback[sg];
+ if (sg_callb == NULL) {
+ upwr_callb user_callb = user_callback[sg];
+ /* no service group callback; call the user callback if any */
+ if (user_callb == NULL) {
+ goto done; /* no user callback */
+ }
+
+ /* make the user callback */
+ user_callb(sg, rxmsg.hdr.function,
+ (upwr_resp_t)rxmsg.hdr.errcode,
+ (size == 2U) ? rxmsg.word2 : rxmsg.hdr.ret);
+ goto done;
+ }
+
+ /*
+ * finally make the group callback. don't uninstall the group
+ * callback, it is permanent.
+ */
+ sg_callb();
+done:
+ if (rxmsg.hdr.errcode == UPWR_RESP_SHUTDOWN) { /* shutdown error: */
+ /*
+ * change the API state automatically. so new requests
+ * are rejected by the API immediately
+ */
+ api_state = UPWR_API_INITLZED;
+ }
+}
+
+/**
+ * upwr_srv_req() - sends a service request message.
+ * @sg: message service group.
+ * @msg: pointer to the message
+ * @size: message size in 32-bit words.
+ *
+ * The message is sent right away if possible, or gets pending to be sent later.
+ * If pending, the message is stored in sg_req_msg and will be sent when the
+ * MU transmission buffer is clear and there are no other pending messages
+ * from higher priority service groups.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: none (void)
+ */
+static void upwr_srv_req(upwr_sg_t sg,
+ uint32_t *msg,
+ unsigned int size)
+{
+ int rc;
+
+ upwr_lock(1);
+ sg_busy |= (uint32_t)1U << sg;
+ upwr_lock(0);
+
+ rc = upwr_tx(msg, size, upwr_next_req);
+ if (rc < 0) {
+ /* queue full, make the transmission pending */
+ msg_copy((char *)&sg_req_msg[sg], (char *)msg, size);
+ sg_req_siz[sg] = size;
+
+ upwr_lock(1);
+ sg_tx_curr = sg;
+ sg_tx_pend |= (uint32_t)1U << sg;
+ upwr_lock(0);
+
+ return;
+ }
+}
+
+/**---------------------------------------------------------------
+ * INITIALIZATION, CONFIGURATION
+ *
+ * A reference uPower initialization sequence goes as follows:
+ *
+ * 1. host CPU calls upwr_init.
+ * 2. (optional) host checks the ROM version and SoC code calling upwr_vers(...)
+ * and optionally performs any configuration or workaround accordingly.
+ * 3. host CPU calls upwr_start to start the uPower services, passing a
+ * service option number.
+ * If no RAM code is loaded or it has no service options, the launch option
+ * number passed must be 0, which will start the services available in ROM.
+ * upwr_start also receives a pointer to a callback called by the API
+ * when the firmware is ready to receive service requests.
+ * The callback may be replaced by polling, calling upwr_req_status in a loop
+ * or upwr_poll_req_status; in this case the callback pointer may be NULL.
+ * A host may call upwr_start even if the services were already started by
+ * any host: if the launch option is the same, the response will be ok,
+ * but will indicate error if the services were already started with a
+ * different launch option.
+ * 4. host waits for the callback calling, or polling finishing;
+ * if no error is returned, it can start making service calls using the API.
+ *
+ * Variations on that reference sequence are possible:
+ * - the uPower services can be started using the ROM code only, which includes
+ * the basic Power Management services, among others, with launch option
+ * number = 0.
+ * The code RAM can be loaded while these services are running and,
+ * when the loading is done, the services can be re-started with these 2
+ * requests executed in order: upwr_xcp_shutdown and upwr_start,
+ * using the newly loaded RAM code (launch option > 0).
+ *
+ * NOTE: the initialization call upwr_init is not effective and
+ * returns error when called after the uPower services are started.
+ */
+
+/**
+ * upwr_start_callb() - internal callback for the Rx message from uPower
+ * that indicates the firmware is ready to receive the start commands.
+ * It calls the user callbacks registered in the upwr_start_boot and upwr_start
+ * call.
+ */
+void upwr_start_callb(void)
+{
+ switch (api_state) {
+ case UPWR_API_START_WAIT: {
+ upwr_rdy_callb start_callb = (upwr_rdy_callb)user_callback[UPWR_SG_EXCEPT];
+ upwr_ready_msg *msg = (upwr_ready_msg *)&sg_rsp_msg[UPWR_SG_EXCEPT];
+
+ fw_ram_version.soc_id = fw_rom_version.soc_id;
+ fw_ram_version.vmajor = msg->args.vmajor;
+ fw_ram_version.vminor = msg->args.vminor;
+ fw_ram_version.vfixes = msg->args.vfixes;
+
+ /*
+ * vmajor == vminor == vfixes == 0 indicates start error
+ * in this case, go back to the INITLZED state
+ */
+ if ((fw_ram_version.vmajor != 0U) ||
+ (fw_ram_version.vminor != 0U) ||
+ (fw_ram_version.vfixes != 0U)) {
+ api_state = UPWR_API_READY;
+
+ /*
+ * initialization is over:
+ * uninstall the user callback just in case
+ */
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, NULL);
+
+ if (fw_launch_option == 0U) {
+ /*
+ * launched ROM firmware:
+ * RAM fw versions must be all 0s
+ */
+ fw_ram_version.vmajor = 0U;
+ fw_ram_version.vminor = 0U;
+ fw_ram_version.vfixes = 0U;
+ }
+ } else {
+ api_state = UPWR_API_INITLZED;
+ }
+
+ start_callb(msg->args.vmajor, msg->args.vminor, msg->args.vfixes);
+ }
+ break;
+
+ case UPWR_API_SHUTDOWN_WAIT: {
+ upwr_callb user_callb = (upwr_callb)user_callback[UPWR_SG_EXCEPT];
+ upwr_shutdown_msg *msg = (upwr_shutdown_msg *)&sg_rsp_msg[UPWR_SG_EXCEPT];
+
+ if ((upwr_resp_t)msg->hdr.errcode == UPWR_RESP_OK) {
+ api_state = UPWR_API_INITLZED;
+ }
+
+ if (user_callb != NULL) {
+ user_callb(UPWR_SG_EXCEPT, UPWR_XCP_SHUTDOWN,
+ (upwr_resp_t)msg->hdr.errcode, 0U);
+ }
+ }
+ break;
+
+ case UPWR_API_READY:
+ {
+ upwr_callb user_callb = (upwr_callb)user_callback[UPWR_SG_EXCEPT];
+ upwr_up_max_msg *msg = (upwr_up_max_msg *)&sg_rsp_msg[UPWR_SG_EXCEPT];
+
+ if (user_callb != NULL) {
+ user_callb(UPWR_SG_EXCEPT, msg->hdr.function,
+ (upwr_resp_t)msg->hdr.errcode,
+ (int)((sg_rsp_siz[UPWR_SG_EXCEPT] == 2U) ?
+ msg->word2 : msg->hdr.ret));
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * upwr_init() - API initialization; must be the first API call after reset.
+ * @domain: SoC-dependent CPU domain id; identifier used by the firmware in
+ * many services. Defined by SoC-dependent type soc_domain_t found in
+ * upower_soc_defs.h.
+ * @muptr: pointer to the MU instance.
+ * @mallocptr: pointer to the memory allocation function
+ * @physaddrptr: pointer to the function to convert pointers to
+ * physical addresses. If NULL, no conversion is made (pointer=physical address)
+ * @isrinstptr: pointer to the function to install the uPower ISR callbacks;
+ * the function receives the pointers to the MU tx/rx and Exception ISRs
+ * callbacks, which must be called from the actual system ISRs.
+ * The function pointed by isrinstptr must also enable the interrupt at the
+ * core/interrupt controller, but must not enable the interrupt at the MU IP.
+ * The system ISRs are responsible for dealing with the interrupt controller,
+ * performing any other context save/restore, and any other housekeeping.
+ * @lockptr: pointer to a function that prevents MU interrupts (if argrument=1)
+ * or allows it (if argument=0). The API calls this function to make small
+ * specific code portions thread safe. Only MU interrupts must be avoided,
+ * the code may be suspended for other reasons.
+ * If no MU interrupts can happen during the execution of an API call or
+ * callback, even if enabled, for some other reason (e.g. interrupt priority),
+ * then this argument may be NULL.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if failed to allocate memory, or use some other resource.
+ * -2 if any argument is invalid.
+ * -3 if failed to send the ping message.
+ * -4 if failed to receive the initialization message, or was invalid
+ */
+int upwr_init(soc_domain_t domain, struct MU_t *muptr,
+ const upwr_malloc_ptr_t mallocptr,
+ const upwr_phyadr_ptr_t phyadrptr,
+ const upwr_inst_isr_ptr_t isrinstptr,
+ const upwr_lock_ptr_t lockptr)
+{
+ uint32_t j;
+
+ upwr_sg_t sg; /* service group number */
+ unsigned int size;
+ unsigned long dom_buffer_base = (domain == RTD_DOMAIN) ? UPWR_API_BUFFER_BASE :
+ ((UPWR_API_BUFFER_ENDPLUS + UPWR_API_BUFFER_BASE) / 2U);
+
+ upwr_init_msg *msg = (upwr_init_msg *)&sg_rsp_msg[UPWR_SG_EXCEPT];
+
+ mu = muptr;
+ /*
+ * Disable tx and rx interrupts in case not called
+ * 1st time after reset
+ */
+ mu->TCR.R = mu->RCR.R = 0U;
+
+ os_malloc = mallocptr;
+ os_ptr2phy = (phyadrptr == (upwr_phyadr_ptr_t)NULL) ? ptr2phys : phyadrptr;
+
+ os_lock = lockptr;
+ api_state = UPWR_API_INIT_WAIT;
+ sg_busy = 0UL;
+ pwr_domain = domain;
+
+ /* initialize the versions, in case they are polled */
+ fw_rom_version.soc_id = 0U;
+ fw_rom_version.vmajor = 0U;
+ fw_rom_version.vminor = 0U;
+ fw_rom_version.vfixes = 0U;
+
+ fw_ram_version.soc_id = 0U;
+ fw_ram_version.vmajor = 0U;
+ fw_ram_version.vminor = 0U;
+ fw_ram_version.vfixes = 0U;
+
+ mu_tx_pend = (uint32_t)0U;
+ sg_tx_pend = (uint32_t)0U;
+
+ sg_tx_curr = UPWR_SG_COUNT; /* means none here */
+
+ sh_buffer[UPWR_SG_EXCEPT] = (void *)(unsigned long)dom_buffer_base;
+ sh_buffer[UPWR_SG_PWRMGMT] = (void *)(unsigned long)(dom_buffer_base +
+ MAX_SG_EXCEPT_MEM_SIZE);
+ sh_buffer[UPWR_SG_DELAYM] = NULL;
+ sh_buffer[UPWR_SG_VOLTM] = (void *)(unsigned long)(dom_buffer_base +
+ MAX_SG_EXCEPT_MEM_SIZE + MAX_SG_PWRMGMT_MEM_SIZE);
+ sh_buffer[UPWR_SG_CURRM] = NULL;
+ sh_buffer[UPWR_SG_TEMPM] = NULL;
+ sh_buffer[UPWR_SG_DIAG] = NULL;
+
+ /* (no buffers service groups other than xcp and pwm for now) */
+ for (j = 0; j < UPWR_SG_COUNT; j++) {
+ user_callback[j] = NULL;
+ /* service group Exception gets the initialization callbacks */
+ sgrp_callback[j] = (j == UPWR_SG_EXCEPT) ? upwr_start_callb : NULL;
+ /* response messages with an initial consistent content */
+ sg_rsp_msg[j].hdr.errcode = UPWR_RESP_SHUTDOWN;
+ }
+
+ /* init message already received, assume takss are running on upower */
+ if (mu->FSR.B.F0 != 0U) {
+ /* send a ping message down to get the ROM version back */
+ upwr_xcp_ping_msg ping_msg = {0};
+
+ ping_msg.hdr.domain = pwr_domain;
+ ping_msg.hdr.srvgrp = UPWR_SG_EXCEPT;
+ ping_msg.hdr.function = UPWR_XCP_PING;
+
+ if (mu->RSR.B.RF0 != 0U) { /* first clean any Rx message left over */
+ (void)upwr_rx((char *)msg, &size);
+ }
+
+ /* wait any TX left over to be sent */
+ while (mu->TSR.R != UPWR_MU_TSR_EMPTY) {
+ }
+
+ /*
+ * now send the ping message;
+ * do not use upwr_tx, which needs API initialized;
+ * just write to the MU TR register(s)
+ */
+ mu->FCR.B.F0 = 1U; /* flag urgency status */
+ upwr_copy2tr(mu, (uint32_t *)&ping_msg, sizeof(ping_msg) / 4U);
+ }
+
+ do {
+ /*
+ * poll for the MU Rx status: wait for an init message, either
+ * 1st sent from uPower after reset or as a response to a ping
+ */
+ while (mu->RSR.B.RF0 == 0U) {
+ }
+
+ /* urgency status off, in case it was set */
+ mu->FCR.B.F0 = 0U;
+
+ if (upwr_rx((char *)msg, &size) < 0) {
+ return -4;
+ }
+
+ if (size != (sizeof(upwr_init_msg) / 4U)) {
+ if (mu->FSR.B.F0 != 0U) {
+ continue; /* discard left over msg */
+ } else {
+ return -4;
+ }
+ }
+
+ sg = (upwr_sg_t)msg->hdr.srvgrp;
+ if (sg != UPWR_SG_EXCEPT) {
+ if (mu->FSR.B.F0 != 0U) {
+ continue; /* discard left over msg */
+ } else {
+ return -4;
+ }
+ }
+
+ if ((upwr_xcp_f_t)msg->hdr.function != UPWR_XCP_INIT) {
+ if (mu->FSR.B.F0 != 0U) {
+ continue; /* discard left over msg */
+ } else {
+ return -4;
+ }
+ }
+
+ break;
+ } while (true);
+
+ fw_rom_version.soc_id = msg->args.soc;
+ fw_rom_version.vmajor = msg->args.vmajor;
+ fw_rom_version.vminor = msg->args.vminor;
+ fw_rom_version.vfixes = msg->args.vfixes;
+
+ if (upwr_rx_callback(upwr_mu_int_callback) < 0) {
+ /* catastrophic error, but is it possible to happen? */
+ return -1;
+ }
+
+ mu_tx_callb = NULL; /* assigned on upwr_tx */
+
+ /* install the ISRs and enable the interrupts */
+ isrinstptr(upwr_txrx_isr, upwr_exp_isr);
+
+ /* enable only RR[0] receive interrupt */
+ mu->RCR.R = 1U;
+
+ api_state = UPWR_API_INITLZED;
+
+ return 0;
+}
+
+/**
+ * upwr_start() - Starts the uPower services.
+ * @launchopt: a number to select between multiple launch options,
+ * that may define, among other things, which services will be started,
+ * or which services implementations, features etc.
+ * launchopt = 0 selects a subset of services implemented in ROM;
+ * any other number selects service sets implemented in RAM, launched
+ * by the firmware function ram_launch; if an invalid launchopt value is passed,
+ * no services are started, and the callback returns error (see below).
+ * @rdycallb: pointer to the callback to be called when the uPower is ready
+ * to receive service requests. NULL if no callback needed.
+ * The callback receives as arguments the RAM firmware version numbers.
+ * If all 3 numbers (vmajor, vminor, vfixes) are 0, that means the
+ * service launching failed.
+ * Firmware version numbers will be the same as ROM if launchopt = 0,
+ * selecting the ROM services.
+ *
+ * upwr_start can be called by any domain even if the services are already
+ * started: it has no effect, returning success, if the launch option is the
+ * same as the one that actually started the service, and returns error if
+ * called with a different option.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if a resource failed,
+ * -2 if the domain passed is the same as the caller,
+ * -3 if called in an invalid API state
+ */
+int upwr_start(uint32_t launchopt, const upwr_rdy_callb rdycallb)
+{
+ upwr_start_msg txmsg = {0};
+
+ if (api_state != UPWR_API_INITLZED) {
+ return -3;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, (upwr_callb)rdycallb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_START);
+
+ txmsg.hdr.arg = fw_launch_option = launchopt;
+
+ if (upwr_tx((uint32_t *)&txmsg, sizeof(txmsg) / 4U, NULL) < 0) {
+ /* catastrophic error, but is it possible to happen? */
+ return -1;
+ }
+
+ api_state = UPWR_API_START_WAIT;
+
+ return 0;
+}
+
+/**---------------------------------------------------------------
+ * EXCEPTION SERVICE GROUP
+ */
+
+/**
+ * upwr_xcp_config() - Applies general uPower configurations.
+ * @config: pointer to the uPower SoC-dependent configuration struct
+ * upwr_xcp_config_t defined in upower_soc_defs.h. NULL may be passed, meaning
+ * a request to read the configuration, in which case it appears in the callback
+ * argument ret, or can be pointed by argument retptr in the upwr_req_status and
+ * upwr_poll_req_status calls, casted to upwr_xcp_config_t.
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the configuration, or NULL if no callback needed (polling used instead).
+ *
+ * Some configurations are targeted for a specific domain (see the struct
+ * upwr_xcp_config_t definition in upower_soc_defs.h); this call has implicit
+ * domain target (the same domain from which is called).
+ *
+ * The return value is always the current configuration value, either in a
+ * read-only request (config = NULL) or after setting a new configuration
+ * (non-NULL config).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_config(const upwr_xcp_config_t *config, const upwr_callb callb)
+{
+ upwr_xcp_config_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ if (config == NULL) {
+ txmsg.hdr.arg = 1U; /* 1= read, txmsg.word2 ignored */
+ } else {
+ txmsg.hdr.arg = 0U; /* 1= write */
+ txmsg.word2 = config->R;
+ }
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_CONFIG);
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_sw_alarm() - Makes uPower issue an alarm interrupt to given domain.
+ * @domain: identifier of the domain to alarm. Defined by SoC-dependent type
+ * soc_domain_t found in upower_soc_defs.h.
+ * @code: alarm code. Defined by SoC-dependent type upwr_alarm_t found in
+ * upower_soc_defs.h.
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the alarm, or NULL if no callback needed (polling used instead).
+ *
+ * The function requests the uPower to issue an alarm of the given code as if
+ * it had originated internally. This service is useful mainly to test the
+ * system response to such alarms, or to make the system handle a similar alarm
+ * situation detected externally to uPower.
+ *
+ * The system ISR/code handling the alarm may retrieve the alarm code by calling
+ * the auxiliary function upwr_alarm_code.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_sw_alarm(soc_domain_t domain,
+ upwr_alarm_t code,
+ const upwr_callb callb)
+{
+ upwr_xcp_swalarm_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_SW_ALARM);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = (uint32_t)code;
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_set_ddr_retention() - M33/A35 can use this API to set/clear ddr retention
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @enable: true, means that set ddr retention, false clear ddr retention.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_ddr_retention(soc_domain_t domain,
+ uint32_t enable,
+ const upwr_callb callb)
+{
+ upwr_xcp_ddr_retn_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_SET_DDR_RETN);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = (uint32_t)enable;
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_set_mipi_dsi_ena() - M33/A35 can use this API to set/clear mipi dsi ena
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @enable: true, means that set ddr retention, false clear ddr retention.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_mipi_dsi_ena(soc_domain_t domain,
+ uint32_t enable,
+ const upwr_callb callb)
+{
+ upwr_xcp_set_mipi_dsi_ena_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_SET_MIPI_DSI_ENA);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = (uint32_t)enable;
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_get_mipi_dsi_ena() - M33/A35 can use this API to get mipi dsi ena status
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_get_mipi_dsi_ena(soc_domain_t domain, const upwr_callb callb)
+{
+ upwr_xcp_get_mipi_dsi_ena_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_GET_MIPI_DSI_ENA);
+ txmsg.hdr.domain = (uint32_t)domain;
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_set_osc_mode() - M33/A35 can use this API to set uPower OSC mode
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @osc_mode, 0 means low frequency, not 0 means high frequency.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_osc_mode(soc_domain_t domain,
+ uint32_t osc_mode,
+ const upwr_callb callb)
+{
+ upwr_xcp_set_osc_mode_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_SET_OSC_MODE);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = (uint32_t)osc_mode;
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_set_rtd_use_ddr() - M33 call this API to inform uPower, M33 is using ddr
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @is_use_ddr: not 0, true, means that RTD is using ddr. 0, false, means that, RTD
+ * is not using ddr.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_rtd_use_ddr(soc_domain_t domain,
+ uint32_t is_use_ddr,
+ const upwr_callb callb)
+{
+ upwr_xcp_rtd_use_ddr_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_SET_RTD_USE_DDR);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = (uint32_t)is_use_ddr;
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_set_rtd_apd_llwu() - M33/A35 can use this API to set/clear rtd_llwu apd_llwu
+ * @domain: set which domain (RTD_DOMAIN, APD_DOMAIN) LLWU.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @enable: true, means that set rtd_llwu or apd_llwu, false clear rtd_llwu or apd_llwu.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_rtd_apd_llwu(soc_domain_t domain,
+ uint32_t enable,
+ const upwr_callb callb)
+{
+ upwr_xcp_rtd_apd_llwu_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_SET_RTD_APD_LLWU);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = (uint32_t)enable;
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_shutdown() - Shuts down all uPower services and power mode tasks.
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the shutdown, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * At the callback the uPower/API is back to initialization/start-up phase,
+ * so service request calls return error.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_shutdown(const upwr_callb callb)
+{
+ upwr_xcp_shutdown_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_SHUTDOWN);
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ api_state = UPWR_API_SHUTDOWN_WAIT;
+
+ return 0;
+}
+
+/**
+ * upwr_xcp_i2c_access() - Performs an access through the uPower I2C interface.
+ * @addr: I2C slave address, up to 10 bits.
+ * @data_size: determines the access direction and data size in bytes, up to 4;
+ * negetive data_size determines a read access with size -data_size;
+ * positive data_size determines a write access with size data_size;
+ * data_size=0 is invalid, making the service return error UPWR_RESP_BAD_REQ.
+ * @subaddr_size: size of the sub-address in bytes, up to 4; if subaddr_size=0,
+ * no subaddress is used.
+ * @subaddr: sub-address, only used if subaddr_size > 0.
+ * @wdata: write data, up to 4 bytes; ignored if data_size < 0 (read)
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the access, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * The service performs a read (data_size < 0) or a write (data_size > 0) of
+ * up to 4 bytes on the uPower I2C interface. The data read from I2C comes via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ *
+ * Sub-addressing is supported, with sub-address size determined by the argument
+ * subaddr_size, up to 4 bytes. Sub-addressing is not used if subaddr_size=0.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+
+int upwr_xcp_i2c_access(uint16_t addr,
+ int8_t data_size,
+ uint8_t subaddr_size,
+ uint32_t subaddr,
+ uint32_t wdata,
+ const upwr_callb callb)
+{
+ unsigned long ptrval = (unsigned long)sh_buffer[UPWR_SG_EXCEPT];
+ upwr_i2c_access *i2c_acc_ptr = (upwr_i2c_access *)ptrval;
+ upwr_pwm_pmiccfg_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_EXCEPT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_EXCEPT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_EXCEPT, UPWR_XCP_I2C);
+
+ i2c_acc_ptr->addr = addr;
+ i2c_acc_ptr->subaddr = subaddr;
+ i2c_acc_ptr->subaddr_size = subaddr_size;
+ i2c_acc_ptr->data = wdata;
+ i2c_acc_ptr->data_size = data_size;
+
+ txmsg.ptr = upwr_ptr2offset(ptrval,
+ UPWR_SG_EXCEPT,
+ (size_t)sizeof(upwr_i2c_access),
+ 0U,
+ i2c_acc_ptr);
+
+ upwr_srv_req(UPWR_SG_EXCEPT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**---------------------------------------------------------------
+ * VOLTAGE MANAGERMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_vtm_pmic_cold_reset() -request cold reset the pmic.
+ * pmic will power cycle all the regulators
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to cold reset the pmic.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_pmic_cold_reset(upwr_callb callb)
+{
+ upwr_volt_pmic_cold_reset_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_VOLTM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_VOLTM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_VOLTM, UPWR_VTM_PMIC_COLD_RESET);
+
+ upwr_srv_req(UPWR_SG_VOLTM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_vtm_set_pmic_mode() -request uPower set pmic mode
+ * @pmic_mode: the target mode need to be set
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to set pmic mode
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_set_pmic_mode(uint32_t pmic_mode, upwr_callb callb)
+{
+ upwr_volt_pmic_set_mode_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_VOLTM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_VOLTM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_VOLTM, UPWR_VTM_SET_PMIC_MODE);
+
+ txmsg.hdr.arg = pmic_mode;
+
+ upwr_srv_req(UPWR_SG_VOLTM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_vtm_chng_pmic_voltage() - Changes the voltage of a given rail.
+ * @rail: pmic rail id.
+ * @volt: the target voltage of the given rail, accurate to uV
+ * If pass volt value 0, means that power off this rail.
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to change the voltage of the given rail.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_chng_pmic_voltage(uint32_t rail, uint32_t volt, upwr_callb callb)
+{
+ upwr_volt_pmic_set_volt_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_VOLTM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_VOLTM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_VOLTM, UPWR_VTM_CHNG_PMIC_RAIL_VOLT);
+
+ txmsg.args.rail = rail;
+
+ txmsg.args.volt = (volt + PMIC_VOLTAGE_MIN_STEP - 1U) / PMIC_VOLTAGE_MIN_STEP;
+
+ upwr_srv_req(UPWR_SG_VOLTM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_vtm_get_pmic_voltage() - Get the voltage of a given rail.
+ * @rail: pmic rail id.
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to get the voltage of the given rail.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * The voltage data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_get_pmic_voltage(uint32_t rail, upwr_callb callb)
+{
+ upwr_volt_pmic_get_volt_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_VOLTM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_VOLTM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_VOLTM, UPWR_VTM_GET_PMIC_RAIL_VOLT);
+
+ txmsg.args.rail = rail;
+
+ upwr_srv_req(UPWR_SG_VOLTM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_vtm_power_measure() - request uPower to measure power consumption
+ * @ssel: This field determines which power switches will have their currents
+ * sampled to be accounted for a
+ * current/power measurement. Support 0~7
+
+ * SSEL bit # Power Switch
+ * 0 M33 core complex/platform/peripherals
+ * 1 Fusion Core and Peripherals
+ * 2 A35[0] core complex
+ * 3 A35[1] core complex
+ * 4 3DGPU
+ * 5 HiFi4
+ * 6 DDR Controller (PHY and PLL NOT included)
+ * 7 PXP, EPDC
+ *
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to measure power consumption
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * The power consumption data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Accurate to uA
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_power_measure(uint32_t ssel, upwr_callb callb)
+{
+ upwr_volt_pmeter_meas_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_VOLTM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_VOLTM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_VOLTM, UPWR_VTM_PMETER_MEAS);
+
+ txmsg.hdr.arg = ssel;
+
+ upwr_srv_req(UPWR_SG_VOLTM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_vtm_vmeter_measure() - request uPower to measure voltage
+ * @vdetsel: Voltage Detector Selector, support 0~3
+ * 00b - RTD sense point
+ 01b - LDO output
+ 10b - APD domain sense point
+ 11b - AVD domain sense point
+ Refer to upower_defs.h
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to use vmeter to measure voltage
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * The voltage data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Refer to RM COREREGVL (Core Regulator Voltage Level)
+ * uPower return VDETLVL to user, user can calculate the real voltage:
+ *
+0b000000(0x00) - 0.595833V
+0b100110(0x26) - 1.007498V
+<value> - 0.595833V + <value>x10.8333mV
+0b110010(0x32) - 1.138V
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_vmeter_measure(uint32_t vdetsel, upwr_callb callb)
+{
+ upwr_volt_vmeter_meas_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_VOLTM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_VOLTM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_VOLTM, UPWR_VTM_VMETER_MEAS);
+
+ txmsg.hdr.arg = vdetsel;
+
+ upwr_srv_req(UPWR_SG_VOLTM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_vtm_pmic_config() - Configures the SoC PMIC (Power Management IC).
+ * @config: pointer to a PMIC-dependent struct defining the PMIC configuration.
+ * @size: size of the struct pointed by config, in bytes.
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change/define the PMIC configuration.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if the pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_pmic_config(const void *config, uint32_t size, upwr_callb callb)
+{
+ upwr_pwm_pmiccfg_msg txmsg = {0};
+ unsigned long ptrval = 0UL; /* needed for X86, ARM64 */
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_VOLTM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_VOLTM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_VOLTM, UPWR_VTM_PMIC_CONFIG);
+
+ ptrval = (unsigned long)os_ptr2phy(config);
+ if (ptrval == 0UL) {
+ return -2; /* pointer conversion failed */
+ }
+
+ txmsg.ptr = upwr_ptr2offset(ptrval,
+ UPWR_SG_VOLTM,
+ (size_t)size,
+ 0U,
+ config);
+
+ upwr_srv_req(UPWR_SG_VOLTM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**---------------------------------------------------------------
+ * TEMPERATURE MANAGEMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_tpm_get_temperature() - request uPower to get temperature of one temperature sensor
+ * @sensor_id: temperature sensor ID, support 0~2
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to measure temperature
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_TEMPM as the service group argument.
+ *
+ * The temperature data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ *
+ * uPower return TSEL to the caller (M33 or A35), caller calculate the real temperature
+ * Tsh = 0.000002673049*TSEL[7:0]^3 + 0.0003734262*TSEL[7:0]^2 +
+0.4487042*TSEL[7:0] - 46.98694
+ *
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_tpm_get_temperature(uint32_t sensor_id, upwr_callb callb)
+{
+ upwr_temp_get_cur_temp_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_TEMPM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_TEMPM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_TEMPM, UPWR_TEMP_GET_CUR_TEMP);
+
+ txmsg.args.sensor_id = sensor_id;
+
+ upwr_srv_req(UPWR_SG_TEMPM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**---------------------------------------------------------------
+ * DELAY MANAGEMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_dlm_get_delay_margin() - request uPower to get delay margin
+ * @path: The critical path
+ * @index: Use whitch delay meter
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to get delay margin
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_DELAYM as the service group argument.
+ *
+ * The delay margin data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_dlm_get_delay_margin(uint32_t path, uint32_t index, upwr_callb callb)
+{
+ upwr_dmeter_get_delay_margin_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_DELAYM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_DELAYM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_DELAYM, UPWR_DMETER_GET_DELAY_MARGIN);
+
+ txmsg.args.path = path;
+ txmsg.args.index = index;
+
+ upwr_srv_req(UPWR_SG_DELAYM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_dlm_set_delay_margin() - request uPower to set delay margin
+ * @path: The critical path
+ * @index: Use whitch delay meter
+ * @delay_margin: the value of delay margin
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to set delay margin
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_DELAYM as the service group argument.
+ *
+ * The result of the corresponding critical path, failed or not read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_dlm_set_delay_margin(uint32_t path, uint32_t index, uint32_t delay_margin,
+ upwr_callb callb)
+{
+ upwr_dmeter_set_delay_margin_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_DELAYM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_DELAYM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_DELAYM, UPWR_DMETER_SET_DELAY_MARGIN);
+
+ txmsg.args.path = path;
+ txmsg.args.index = index;
+ txmsg.args.dm = delay_margin;
+
+ upwr_srv_req(UPWR_SG_DELAYM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_dlm_process_monitor() - request uPower to do process monitor
+ * @chain_sel: Chain Cell Type Selection
+ * Select the chain to be used for the clock signal generation.
+ * Support two types chain cell, 0~1
+0b - P4 type delay cells selected
+1b - P16 type delay cells selected
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to do process monitor
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_DELAYM as the service group argument.
+ *
+ * The result of process monitor, failed or not read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_dlm_process_monitor(uint32_t chain_sel, upwr_callb callb)
+{
+ upwr_pmon_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_DELAYM)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_DELAYM, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_DELAYM, UPWR_PMON_REQ);
+
+ txmsg.args.chain_sel = chain_sel;
+
+ upwr_srv_req(UPWR_SG_DELAYM, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**---------------------------------------------------------------
+ * POWER MANAGEMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_pwm_dom_power_on() - Commands uPower to power on the platform of other
+ * domain (not necessarily its core(s)); does not release the core reset.
+ * @domain: identifier of the domain to power on. Defined by SoC-dependent type
+ * soc_domain_t found in upower_soc_defs.h.
+ * @boot_start: must be 1 to start the domain core(s) boot(s), releasing
+ * its (their) resets, or 0 otherwise.
+ * @pwroncallb: pointer to the callback to be called when the uPower has
+ * finished the power on procedure, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -2 if the domain passed is the same as the caller,
+ * -3 if called in an invalid API state
+ */
+int upwr_pwm_dom_power_on(soc_domain_t domain,
+ int boot_start,
+ const upwr_callb pwroncallb)
+{
+ upwr_pwm_dom_pwron_msg txmsg = {0};
+
+ if (pwr_domain == domain) {
+ return -2;
+ }
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, (upwr_callb)pwroncallb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_DOM_PWRON);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = (uint32_t)boot_start;
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_boot_start() - Commands uPower to release the reset of other CPU(s),
+ * starting their boots.
+ * @domain: identifier of the domain to release the reset. Defined by
+ * SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @bootcallb: pointer to the callback to be called when the uPower has finished
+ * the boot start procedure, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * The callback calling doesn't mean the CPUs boots have finished:
+ * it only indicates that uPower released the CPUs resets, and can receive
+ * other power management service group requests.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -2 if the domain passed is the same as the caller,
+ * -3 if called in an invalid API state
+ */
+int upwr_pwm_boot_start(soc_domain_t domain, const upwr_callb bootcallb)
+{
+ upwr_pwm_boot_start_msg txmsg = {0};
+
+ if (pwr_domain == domain) {
+ return -2;
+ }
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, (upwr_callb)bootcallb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_BOOT);
+ txmsg.hdr.domain = (uint32_t)domain;
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_param() - Changes Power Management parameters.
+ * @param: pointer to a parameter structure upwr_pwm_param_t, SoC-dependent,
+ * defined in upwr_soc_defines.h. NULL may be passed, meaning
+ * a request to read the parameter set, in which case it appears in the callback
+ * argument ret, or can be pointed by argument retptr in the upwr_req_status and
+ * upwr_poll_req_status calls, casted to upwr_pwm_param_t.
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The return value is always the current parameter set value, either in a
+ * read-only request (param = NULL) or after setting a new parameter
+ * (non-NULL param).
+ *
+ * Some parameters may be targeted for a specific domain (see the struct
+ * upwr_pwm_param_t definition in upower_soc_defs.h); this call has implicit
+ * domain target (the same domain from which is called).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_pwm_param(upwr_pwm_param_t *param, const upwr_callb callb)
+{
+ upwr_pwm_param_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_PARAM);
+
+ if (param == NULL) {
+ txmsg.hdr.arg = 1U; /* 1= read, txmsg.word2 ignored */
+ } else {
+ txmsg.hdr.arg = 0U; /* 1= write */
+ txmsg.word2 = param->R; /* just 1 word, so that's ok */
+ }
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_chng_reg_voltage() - Changes the voltage at a given regulator.
+ * @reg: regulator id.
+ * @volt: voltage value; value unit is SoC-dependent, converted from mV by the
+ * macro UPWR_VTM_MILIV, or from micro-Volts by the macro UPWR_VTM_MICROV,
+ * both macros in upower_soc_defs.h
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to change the voltage of the given regulator.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_chng_reg_voltage(uint32_t reg, uint32_t volt, upwr_callb callb)
+{
+ upwr_pwm_volt_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_VOLT);
+
+ txmsg.args.reg = reg;
+ txmsg.args.volt = volt;
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_freq_setup() - Determines the next frequency target for a given
+ * domain and current frequency.
+ * @domain: identifier of the domain to change frequency. Defined by
+ * SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @rail: the pmic regulator number for the target domain.
+ * @stage: DVA adjust stage
+ * refer to upower_defs.h "DVA adjust stage"
+ * @target_freq: the target adjust frequency, accurate to MHz
+ *
+ * refer to upower_defs.h structure definition upwr_pwm_freq_msg
+ *
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The DVA algorithm is broken down into two phases.
+ * The first phase uses a look up table to get a safe operating voltage
+ * for the requested frequency.
+ * This voltage is guaranteed to work over process and temperature.
+ *
+ * The second step of the second phase is to measure the temperature
+ * using the uPower Temperature Sensor module.
+ * This is accomplished by doing a binary search of the TSEL bit field
+ * in the Temperature Measurement Register (TMR).
+ * The search is repeated until the THIGH bit fields in the same register change value.
+ * There are 3 temperature sensors in 8ULP (APD, AVD, and RTD).
+ *
+ *
+ * The second phase is the fine adjust of the voltage.
+ * This stage is entered only when the new frequency requested
+ * by application was already set as well as the voltage for that frequency.
+ * The first step of the fine adjust is to find what is the current margins
+ * for the monitored critical paths, or, in other words,
+ * how many delay cells will be necessary to generate a setup-timing violation.
+ * The function informs uPower that the given domain frequency has changed or
+ * will change to the given value. uPower firmware will then adjust voltage and
+ * bias to cope with the new frequency (if decreasing) or prepare for it
+ * (if increasing). The function must be called after decreasing the frequency,
+ * and before increasing it. The actual increase in frequency must not occur
+ * before the service returns its response.
+ *
+ * So, for increase clock frequency case, user need to call this API twice,
+ * the first stage gross adjust and the second stage fine adjust.
+ *
+ * for reduce clock frequency case, user can only call this API once,
+ * full stage (combine gross stage and fine adjust)
+ *
+ * The request is executed if arguments are within range.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_freq_setup(soc_domain_t domain, uint32_t rail, uint32_t stage, uint32_t target_freq,
+ upwr_callb callb)
+{
+ upwr_pwm_freq_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_FREQ);
+
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.args.rail = rail;
+ txmsg.args.stage = stage;
+ txmsg.args.target_freq = target_freq;
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_power_on()- Powers on (not off) one or more switches and ROM/RAMs.
+ * @swton: pointer to an array of words that tells which power switches to
+ * turn on. Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective switch must be turned on,
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no switch will be changed,
+ * unless a memory that it feeds must be turned on.
+ * WARNING: swton must not point to the first shared memory address.
+ * @memon: pointer to an array of words that tells which memories to turn on.
+ * Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective memory must be turned on, both array and
+ * periphery logic;
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no memory will be changed.
+ * WARNING: memon must not point to the first shared memory address.
+ * @callb: pointer to the callback called when configurations are applyed.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to turn on the PMC and memory array/peripheral
+ * switches that control their power, as specified above.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate memory power state related to overall system state.
+ *
+ * If a memory is requested to turn on, but the power switch that feeds that
+ * memory is not, the power switch will be turned on anyway, if the pwron
+ * array is not provided (that is, if pwron is NULL).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+
+int upwr_pwm_power_on(const uint32_t swton[],
+ const uint32_t memon[],
+ upwr_callb callb)
+{
+ upwr_pwm_pwron_msg txmsg = {0};
+ unsigned long ptrval = 0UL; /* needed for X86, ARM64 */
+ size_t stsize = 0U;
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_PWR_ON);
+
+ ptrval = (unsigned long)os_ptr2phy((void *)swton);
+ if (swton == NULL) {
+ txmsg.ptrs.ptr0 = 0; /* NULL pointer -> 0 offset */
+ } else if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ } else {
+ txmsg.ptrs.ptr0 = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ (stsize = UPWR_PMC_SWT_WORDS * 4U),
+ 0U,
+ swton);
+ }
+
+ ptrval = (unsigned long)os_ptr2phy((void *)memon);
+ if (memon == NULL) {
+ txmsg.ptrs.ptr1 = 0; /* NULL pointer -> 0 offset */
+
+ } else if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ } else {
+ txmsg.ptrs.ptr1 = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ UPWR_PMC_MEM_WORDS * 4U,
+ stsize,
+ memon);
+ }
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_power_off()- Powers off (not on) one or more switches and ROM/RAMs.
+ * @swtoff: pointer to an array of words that tells which power switches to
+ * turn off. Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective switch must be turned off,
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no switch will be changed.
+ * WARNING: swtoff must not point to the first shared memory address.
+ * @memoff: pointer to an array of words that tells which memories to turn off.
+ * Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective memory must be turned off, both array and
+ * periphery logic;
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no memory will be changed,
+ * but notice it may be turned off if the switch that feeds it is powered off.
+ * WARNING: memoff must not point to the first shared memory address.
+ * @callb: pointer to the callback called when configurations are applyed.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to turn off the PMC and memory array/peripheral
+ * switches that control their power, as specified above.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate memory power state related to overall system state.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_power_off(const uint32_t swtoff[],
+ const uint32_t memoff[],
+ upwr_callb callb)
+{
+ upwr_pwm_pwroff_msg txmsg = {0};
+ unsigned long ptrval = 0UL; /* needed for X86, ARM64 */
+ size_t stsize = 0;
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_PWR_OFF);
+
+ ptrval = (unsigned long)os_ptr2phy((void *)swtoff);
+ if (swtoff == NULL) {
+ txmsg.ptrs.ptr0 = 0; /* NULL pointer -> 0 offset */
+ } else if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ } else {
+ txmsg.ptrs.ptr0 = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ (stsize = UPWR_PMC_SWT_WORDS * 4U),
+ 0U,
+ swtoff);
+ }
+
+ ptrval = (unsigned long)os_ptr2phy((void *)memoff);
+ if (memoff == NULL) {
+ txmsg.ptrs.ptr1 = 0; /* NULL pointer -> 0 offset */
+ } else if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ } else {
+ txmsg.ptrs.ptr1 = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ UPWR_PMC_MEM_WORDS * 4U,
+ stsize,
+ memoff);
+ }
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_mem_retain()- Configures one or more memory power switches to
+ * retain its contents, having the power array on, while its peripheral logic
+ * is turned off.
+ * @mem: pointer to an array of words that tells which memories to put in a
+ * retention state. Each word in the array has 1 bit for each memory.
+ * A bit=1 means the respective memory must be put in retention state,
+ * bit = 0 means it will stay unchanged (retention, fully on or off).
+ * @callb: pointer to the callback called when configurations are applyed.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to turn off the memory peripheral and leave
+ * its array on, as specified above.
+ * The request is executed if arguments are within range.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_mem_retain(const uint32_t mem[], upwr_callb callb)
+{
+ upwr_pwm_retain_msg txmsg = {0};
+ unsigned long ptrval = 0UL; /* needed for X86, ARM64 */
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_RETAIN);
+
+ ptrval = (unsigned long)os_ptr2phy((void *)mem);
+ if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ }
+
+ txmsg.ptr = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ UPWR_PMC_MEM_WORDS * 4U,
+ 0U,
+ mem);
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_chng_switch_mem() - Turns on/off power on one or more PMC switches
+ * and memories, including their array and peripheral logic.
+ * @swt: pointer to a list of PMC switches to be opened/closed.
+ * The list is structured as an array of struct upwr_switch_board_t
+ * (see upower_defs.h), each one containing a word for up to 32 switches,
+ * one per bit. A bit = 1 means switch closed, bit = 0 means switch open.
+ * struct upwr_switch_board_t also specifies a mask with 1 bit for each
+ * respective switch: mask bit = 1 means the open/close action is applied,
+ * mask bit = 0 means the switch stays unchanged.
+ * The pointer may be set to NULL, in which case no switch will be changed,
+ * unless a memory that it feeds must be turned on.
+ * WARNING: swt must not point to the first shared memory address.
+ * @mem: pointer to a list of switches to be turned on/off.
+ * The list is structured as an array of struct upwr_mem_switches_t
+ * (see upower_defs.h), each one containing 2 word for up to 32 switches,
+ * one per bit, one word for the RAM array power switch, other for the
+ * RAM peripheral logic power switch. A bit = 1 means switch closed,
+ * bit = 0 means switch open.
+ * struct upwr_mem_switches_t also specifies a mask with 1 bit for each
+ * respective switch: mask bit = 1 means the open/close action is applied,
+ * mask bit = 0 means the switch stays unchanged.
+ * The pointer may be set to NULL, in which case no memory switch will be
+ * changed, but notice it may be turned off if the switch that feeds it is
+ * powered off.
+ * WARNING: mem must not point to the first shared memory address.
+ * @callb: pointer to the callback called when the configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the PMC switches and/or memory power
+ * as specified above.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate switch combinations and overall system state.
+ *
+ * If a memory is requested to turn on, but the power switch that feeds that
+ * memory is not, the power switch will be turned on anyway, if the swt
+ * array is not provided (that is, if swt is NULL).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy.
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+
+int upwr_pwm_chng_switch_mem(const struct upwr_switch_board_t swt[],
+ const struct upwr_mem_switches_t mem[],
+ upwr_callb callb)
+{
+ upwr_pwm_switch_msg txmsg = {0};
+ unsigned long ptrval = 0UL; /* needed for X86, ARM64 */
+ size_t stsize = 0U;
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_SWITCH);
+
+ ptrval = (unsigned long)os_ptr2phy((void *)swt);
+ if (swt == NULL) {
+ txmsg.ptrs.ptr0 = 0; /* NULL pointer -> 0 offset */
+ } else if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ } else {
+ txmsg.ptrs.ptr0 = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ (stsize = UPWR_PMC_SWT_WORDS * sizeof(struct upwr_switch_board_t)),
+ 0U,
+ swt);
+ }
+
+ ptrval = (unsigned long)os_ptr2phy((void *)mem);
+ if (mem == NULL) {
+ txmsg.ptrs.ptr1 = 0; /* NULL pointer -> 0 offset */
+ } else if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ } else {
+ txmsg.ptrs.ptr1 = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ UPWR_PMC_MEM_WORDS * sizeof(struct upwr_mem_switches_t),
+ stsize,
+ mem);
+ }
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_pmode_config() - Configures a given power mode in a given domain.
+ * @domain: identifier of the domain to which the power mode belongs.
+ * Defined by SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @pmode: SoC-dependent power mode identifier defined by type abs_pwr_mode_t
+ * found in upower_soc_defs.h.
+ * @config: pointer to an SoC-dependent struct defining the power mode
+ * configuration, found in upower_soc_defs.h.
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the power mode configuration as
+ * specified above. The request is executed if arguments are within range,
+ * and complies with SoC-dependent restrictions on value combinations.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if the pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_pmode_config(soc_domain_t domain,
+ abs_pwr_mode_t pmode,
+ const void *config,
+ upwr_callb callb)
+{
+ upwr_pwm_pmode_cfg_msg txmsg = {0};
+ unsigned long ptrval = 0UL; /* needed for X86, ARM64 */
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_CONFIG);
+ txmsg.hdr.domain = (uint32_t)domain;
+ txmsg.hdr.arg = pmode;
+
+ ptrval = (unsigned long)os_ptr2phy(config);
+ if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ }
+
+ /*
+ * upwr_pwm_pmode_config is an exception: use the pointer
+ * (physical addr) as is
+ */
+
+ txmsg.ptr = (uint32_t)ptrval;
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_reg_config() - Configures the uPower internal regulators.
+ * @config: pointer to the struct defining the regulator configuration;
+ * the struct upwr_reg_config_t is defined in the file upower_defs.h.
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change/define the configurations of the
+ * internal regulators.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * The service may fail with error UPWR_RESP_RESOURCE if a power mode transition
+ * or the same service (called from another domain) is executing simultaneously.
+ * This error should be interpreted as a "try later" response, as the service
+ * will succeed once those concurrent executions are done, and no other is
+ * started.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if the pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+
+int upwr_pwm_reg_config(const struct upwr_reg_config_t *config,
+ upwr_callb callb)
+{
+ upwr_pwm_regcfg_msg txmsg = {0};
+ unsigned long ptrval = 0UL; /* needed for X86, ARM64 */
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_REGCFG);
+
+ ptrval = (unsigned long)os_ptr2phy(config);
+ if (ptrval == 0U) {
+ return -2; /* pointer conversion failed */
+ }
+
+ txmsg.ptr = upwr_ptr2offset(ptrval,
+ UPWR_SG_PWRMGMT,
+ sizeof(struct upwr_reg_config_t),
+ 0U,
+ config);
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_chng_dom_bias() - Changes the domain bias.
+ * @bias: pointer to a domain bias configuration struct (see upower_soc_defs.h).
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the domain bias configuration as
+ * specified above. The request is executed if arguments are within range,
+ * with no protections regarding the adequate value combinations and
+ * overall system state.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_chng_dom_bias(const struct upwr_dom_bias_cfg_t *bias,
+ upwr_callb callb)
+{
+ upwr_pwm_dom_bias_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_DOM_BIAS);
+
+ /* SoC-dependent argument filling, defined in upower_soc_defs.h */
+ UPWR_FILL_DOMBIAS_ARGS(txmsg.hdr.domain, bias, txmsg.args);
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**
+ * upwr_pwm_chng_mem_bias()- Changes a ROM/RAM power bias.
+ * @domain: identifier of the domain upon which the bias is applied.
+ * Defined by SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @bias: pointer to a memory bias configuration struct (see upower_soc_defs.h).
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the memory bias configuration as
+ * specified above. The request is executed if arguments are within range,
+ * with no protections regarding the adequate value combinations and
+ * overall system state.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_chng_mem_bias(soc_domain_t domain,
+ const struct upwr_mem_bias_cfg_t *bias,
+ upwr_callb callb)
+{
+ upwr_pwm_mem_bias_msg txmsg = {0};
+
+ if (api_state != UPWR_API_READY) {
+ return -3;
+ }
+
+ if (UPWR_SG_BUSY(UPWR_SG_PWRMGMT)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_PWRMGMT, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_PWRMGMT, UPWR_PWM_MEM_BIAS);
+
+ txmsg.hdr.domain = (uint32_t)domain;
+
+ /* SoC-dependent argument filling, defined in upower_soc_defs.h */
+ UPWR_FILL_MEMBIAS_ARGS(bias, txmsg.args);
+
+ upwr_srv_req(UPWR_SG_PWRMGMT, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**---------------------------------------------------------------
+ * DIAGNOSE SERVICE GROUP
+ */
+
+/**
+ * upwr_dgn_mode() - Sets the diagnostic mode.
+ * @mode: diagnostic mode, which can be:
+ * - UPWR_DGN_NONE: no diagnostic recorded
+ * - UPWR_DGN_TRACE: warnings, errors, service, internal activity recorded
+ * - UPWR_DGN_SRVREQ: warnings, errors, service activity recorded
+ * - UPWR_DGN_WARN: warnings and errors recorded
+ * - UPWR_DGN_ALL: trace, service, warnings, errors, task state recorded
+ * - UPWR_DGN_ERROR: only errors recorded
+ * - UPWR_DGN_ALL2ERR: record all until an error occurs,
+ * freeze recording on error
+ * - UPWR_DGN_ALL2HLT: record all until an error occurs,
+ * executes an ebreak on error, which halts the core if enabled through
+ * the debug interface
+ * @callb: pointer to the callback called when mode is changed.
+ * NULL if no callback is required.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_dgn_mode(upwr_dgn_mode_t mode, const upwr_callb callb)
+{
+ upwr_dgn_mode_msg txmsg = {0};
+
+ if (UPWR_SG_BUSY(UPWR_SG_DIAG)) {
+ return -1;
+ }
+
+ UPWR_USR_CALLB(UPWR_SG_DIAG, callb);
+
+ UPWR_MSG_HDR(txmsg.hdr, UPWR_SG_DIAG, UPWR_DGN_MODE);
+
+ txmsg.hdr.arg = mode;
+
+ upwr_srv_req(UPWR_SG_DIAG, (uint32_t *)&txmsg, sizeof(txmsg) / 4U);
+
+ return 0;
+}
+
+/**---------------------------------------------------------------
+ * AUXILIARY CALLS
+ */
+
+/**
+ * upwr_rom_version() - informs the ROM firwmware version.
+ * @vmajor: pointer to the variable to get the firmware major version number.
+ * @vminor: pointer to the variable to get the firmware minor version number.
+ * @vfixes: pointer to the variable to get the firmware fixes number.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: SoC id.
+ */
+uint32_t upwr_rom_version(uint32_t *vmajor, uint32_t *vminor, uint32_t *vfixes)
+{
+ uint32_t soc;
+
+ upwr_lock(1);
+ soc = fw_rom_version.soc_id;
+ *vmajor = fw_rom_version.vmajor;
+ *vminor = fw_rom_version.vminor;
+ *vfixes = fw_rom_version.vfixes;
+ upwr_lock(0);
+ return soc;
+}
+
+/**
+ * upwr_ram_version() - informs the RAM firwmware version.
+ * @vminor: pointer to the variable to get the firmware minor version number.
+ * @vfixes: pointer to the variable to get the firmware fixes number.
+ *
+ * The 3 values returned are 0 if no RAM firmwmare was loaded and initialized.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: firmware major version number.
+ */
+uint32_t upwr_ram_version(uint32_t *vminor, uint32_t *vfixes)
+{
+ uint32_t vmajor;
+
+ upwr_lock(1);
+ vmajor = fw_ram_version.vmajor;
+ *vminor = fw_ram_version.vminor;
+ *vfixes = fw_ram_version.vfixes;
+ upwr_lock(0);
+
+ return vmajor;
+}
+
+/**
+ * upwr_req_status() - tells the status of the service group request, and
+ * returns a request return value, if any.
+ * @sg: service group of the request
+ * @sgfptr: pointer to the variable that will hold the function id of
+ * the last request completed; can be NULL, in which case it is not used.
+ * @errptr: pointer to the variable that will hold the error code;
+ * can be NULL, in which case it is not used.
+ * @retptr: pointer to the variable that will hold the value returned
+ * by the last request completed (invalid if the last request completed didn't
+ * return any value); can be NULL, in which case it is not used.
+ * Note that a request may return a value even if service error is returned
+ * (*errptr != UPWR_RESP_OK): that is dependent on the specific service.
+ *
+ * This call can be used in a poll loop of a service request completion in case
+ * a callback was not registered.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: service request status: succeeded, failed, or ongoing (busy)
+ */
+upwr_req_status_t upwr_req_status(upwr_sg_t sg,
+ uint32_t *sgfptr,
+ upwr_resp_t *errptr,
+ int *retptr)
+{
+ upwr_req_status_t status;
+
+ upwr_lock(1);
+ if (sgfptr != NULL) {
+ *sgfptr = (uint32_t)sg_rsp_msg[sg].hdr.function;
+ }
+
+ if (errptr != NULL) {
+ *errptr = (upwr_resp_t)sg_rsp_msg[sg].hdr.errcode;
+ }
+
+ if (retptr != NULL) {
+ *retptr = (int)((sg_rsp_siz[sg] == 2U) ?
+ sg_rsp_msg[sg].word2 : sg_rsp_msg[sg].hdr.ret);
+ }
+
+ status = ((sg_busy & (1UL << sg)) == 1U) ? UPWR_REQ_BUSY :
+ (sg_rsp_msg[sg].hdr.errcode == UPWR_RESP_OK) ? UPWR_REQ_OK :
+ UPWR_REQ_ERR;
+ upwr_lock(0);
+ return status;
+}
+
+/**
+ * upwr_poll_req_status() - polls the status of the service group request, and
+ * returns a request return value, if any.
+ * @sg: service group of the request
+ * @sgfptr: pointer to the variable that will hold the function id of
+ * the last request completed; can be NULL, in which case it is not used.
+ * @errptr: pointer to the variable that will hold the error code;
+ * can be NULL, in which case it is not used.
+ * @retptr: pointer to the variable that will hold the value returned
+ * by the last request completed (invalid if the last request completed didn't
+ * return any value); can be NULL, in which case it is not used.
+ * Note that a request may return a value even if service error is returned
+ * (*errptr != UPWR_RESP_OK): that is dependent on the specific service.
+ * @attempts: maximum number of polling attempts; if attempts > 0 and is
+ * reached with no service response received, upwr_poll_req_status returns
+ * UPWR_REQ_BUSY and variables pointed by sgfptr, retptr and errptr are not
+ * updated; if attempts = 0, upwr_poll_req_status waits "forever".
+ *
+ * This call can be used to poll a service request completion in case a
+ * callback was not registered.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: service request status: succeeded, failed, or ongoing (busy)
+ */
+upwr_req_status_t upwr_poll_req_status(upwr_sg_t sg,
+ uint32_t *sgfptr,
+ upwr_resp_t *errptr,
+ int *retptr,
+ uint32_t attempts)
+{
+ uint32_t i;
+ upwr_req_status_t ret;
+
+ if (attempts == 0U) {
+ while ((ret = upwr_req_status(sg, sgfptr, errptr, retptr)) == UPWR_REQ_BUSY) {
+ };
+
+ return ret;
+ }
+
+ for (i = 0U; i < attempts; i++) {
+ ret = upwr_req_status(sg, sgfptr, errptr, retptr);
+ if (ret != UPWR_REQ_BUSY) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * upwr_alarm_code() - returns the alarm code of the last alarm occurrence.
+ *
+ * The value returned is not meaningful if no alarm was issued by uPower.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: alarm code, as defined by the type upwr_alarm_t in upwr_soc_defines.h
+ */
+upwr_alarm_t upwr_alarm_code(void)
+{
+ return (upwr_alarm_t)(3U & (mu->FSR.R >> 1U)); /* FSR[2:1] */
+}
+
+/**---------------------------------------------------------------
+ * TRANSMIT/RECEIVE PRIMITIVES
+ * ---------------------------------------------------------------
+ */
+
+/*
+ * upwr_copy2tr() - copies a message to the MU TR registers;
+ * fill the TR registers before writing TIEN to avoid early interrupts;
+ * also, fill them from the higher index to the lowest, so the receive
+ * interrupt flag RF[0] will be the last to set, regardless of message size;
+ */
+void upwr_copy2tr(struct MU_t *local_mu, const uint32_t *msg, unsigned int size)
+{
+ for (int i = (int)size - 1; i > -1; i--) {
+ local_mu->TR[i].R = msg[i];
+ }
+}
+
+/**
+ * upwr_tx() - queues a message for transmission.
+ * @msg : pointer to the message sent.
+ * @size: message size in 32-bit words
+ * @callback: pointer to a function to be called when transmission done;
+ * can be NULL, in which case no callback is done.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: number of vacant positions left in the transmission queue, or
+ * -1 if the queue was already full when upwr_tx was called, or
+ * -2 if any argument is invalid (like size off-range)
+ */
+int upwr_tx(const uint32_t *msg,
+ unsigned int size,
+ UPWR_TX_CALLB_FUNC_T callback)
+{
+ if (size > UPWR_MU_MSG_SIZE) {
+ return -2;
+ }
+
+ if (size == 0U) {
+ return -2;
+ }
+
+ if (mu->TSR.R != UPWR_MU_TSR_EMPTY) {
+ return -1; /* not all TE bits in 1: some data to send still */
+ }
+
+ mu_tx_callb = callback;
+
+ upwr_copy2tr(mu, msg, size);
+ mu->TCR.R = 1UL << (size - 1UL);
+
+ mu_tx_pend = 1UL;
+
+ return 0;
+}
+
+/**
+ * upwr_rx() - unqueues a received message from the reception queue.
+ * @msg: pointer to the message destination buffer.
+ * @size: pointer to variable to hold message size in 32-bit words.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: number of messages remaining in the reception queue, or
+ * -1 if the queue was already empty when upwr_rx was called, or
+ * -2 if any argument is invalid (like mu off-range)
+ */
+int upwr_rx(char *msg, unsigned int *size)
+{
+ unsigned int len = mu->RSR.R;
+
+ len = (len == 0x0U) ? 0U :
+ (len == 0x1U) ? 1U :
+ #if UPWR_MU_MSG_SIZE > 1
+ (len == 0x3U) ? 2U :
+ #if UPWR_MU_MSG_SIZE > 2
+ (len == 0x7U) ? 3U :
+ #if UPWR_MU_MSG_SIZE > 3
+ (len == 0xFU) ? 4U :
+ #endif
+ #endif
+ #endif
+ 0xFFFFFFFFU; /* something wrong */
+
+ if (len == 0xFFFFFFFFU) {
+ return -3;
+ }
+
+ if (len == 0U) {
+ return -1;
+ }
+
+ *size = len;
+
+ /*
+ * copy the received message to the rx queue,
+ * so the interrupts are cleared.
+ */
+ msg_copy(msg, (char *)&mu->RR[0], len);
+
+ mu->RCR.R = 1U; /* enable only RR[0] receive interrupt */
+
+ return 0;
+}
+
+/**
+ * upwr_rx_callback() - sets up a callback for a message receiving event.
+ * @callback: pointer to a function to be called when a message arrives;
+ * can be NULL, in which case no callback is done.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok; -2 if any argument is invalid (mu off-range).
+ */
+int upwr_rx_callback(UPWR_RX_CALLB_FUNC_T callback)
+{
+ mu_rx_callb = callback;
+
+ return 0;
+}
+
+/**
+ * msg_copy() - copies a message.
+ * @dest: pointer to the destination message.
+ * @src : pointer to the source message.
+ * @size: message size in words.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: none (void)
+ */
+void msg_copy(char *dest, char *src, unsigned int size)
+{
+ for (uint32_t i = 0U; i < size * sizeof(uint32_t); i++) {
+ dest[i] = src[i];
+ }
+}
diff --git a/plat/imx/imx8ulp/upower/upower_api.h b/plat/imx/imx8ulp/upower/upower_api.h
new file mode 100644
index 0000000000..0069f5f0e7
--- /dev/null
+++ b/plat/imx/imx8ulp/upower/upower_api.h
@@ -0,0 +1,1629 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/**
+ * Copyright 2019-2024 NXP
+ *
+ * KEYWORDS: micro-power uPower driver API
+ * -----------------------------------------------------------------------------
+ * PURPOSE: uPower driver API
+ * -----------------------------------------------------------------------------
+ * PARAMETERS:
+ * PARAM NAME RANGE:DESCRIPTION: DEFAULTS: UNITS
+ * -----------------------------------------------------------------------------
+ * REUSE ISSUES: no reuse issues
+ */
+#ifndef UPWR_API_H
+#define UPWR_API_H
+
+#include "upmu.h"
+#include "upower_soc_defs.h"
+/******************************************************************************
+ * uPower API Overview and Concepts
+ *
+ * This API is intended to be used by the OS drivers (Linux, FreeRTOS etc)
+ * as well as bare metal drivers to command and use services from the uPower.
+ * It aims to be OS-independent.
+ *
+ * The API functions fall in 3 categories:
+ * - initialization/start-up
+ * - service requests
+ * - auxiliary
+ *
+ * The communication with the uPower is mostly made through the Message Unit
+ * (MU) IP. uPower provides one MU for each CPU cluster in a different
+ * power domain. An API instance runs on each CPU cluster.
+ *
+ * The API assumes each SoC power domain/CPU cluster receives 2 interrupts
+ * from the uPower MU:
+ * 1. Tx/Rx, which is issued on both transmission and reception
+ * 2. Exception interrupt, to handle critical alams, catastrophic errors, etc.
+ * This interrupt should have a high priority, preferably an NMI.
+ *
+ * The normal uPower operation is done by service requests. There is an API
+ * function for each service request, and all service requests send back a
+ * response, at least to indicate success/failure.
+ * The service request functions are non-blocking, and their completion can be
+ * tracked in two ways:
+ * 1. by a callback, registered when the service request call is made by
+ * passing the callback function pointer; a NULL pointer may be passed,
+ * in which case no callback is made.
+ * 2. by polling, using the auxiliary functions upwr_req_status or
+ * upwr_poll_req_status;
+ * polling must be used if no callback is registered, but callbacks and
+ * polling are completely independent.
+ *
+ * Note: a service request must not be started from a callback.
+ *
+ * uPower service requests are classified in Service Groups.
+ * Each Service Group has a set of related functions, named upwr_XXX_,
+ * where XXX is a 3-letter service group mnemonic. The service groups are:
+ * - Exception Service Group - upwr_xcp_*
+ * ~ gathers functions that deal with errors and other processes outside
+ * the functional scope.
+ * - Power Management Service Group - upwr_pwm_*
+ * ~ functions to control switches, configure power modes, set internal voltage etc
+ * - Delay Measurement Service Group - upwr_dlm_*
+ * ~ delay measurements function using the process monitor and delay meter
+ * - Voltage Measurement Service Group - upwr_vtm_*
+ * ~ functions for voltage measurements, comparisons, alarms, power meter, set PMIC rail voltage
+ * - Temperature Measurement Service Group - upwr_tpm_*
+ * ~ functions for temperature measurements, comparisons, alarms
+ * - Current Measurement Service Group - upwr_crm_*
+ * ~ functions for current and charge measurement
+ * - Diagnostic Service Group - upwr_dgn_*
+ * ~ functions for log configuration and statistics collecting
+ *
+ * Service requests follow this "golden rule":
+ * *** No two requests run simultaneously for the same service group,
+ * on the same domain ***
+ * They can run simultaneously on different domains (RTD/APD), and can also run
+ * simultaneously if belong to different service groups (even on same domain).
+ * Therefore, requests to the same service group on the same domain must be
+ * serialized. A service request call returns error if there is another request
+ * on the same service group pending, waiting a response (on the same domain).
+ *
+ * A request for continuous service does not block the service group.
+ * For instance, a request to "measure the temperature each 10 miliseconds"
+ * responds quickly, unlocks the service group, and the temperature
+ * continues to be measured as requested, every 10 miliseconds from then on.
+ *
+ * Service Groups have a fixed priority in the API, from higher to lower:
+ * 1. Exception
+ * 2. Power Management
+ * 3. Delay Measurement
+ * 4. Voltage Measurement
+ * 5. Current Measurement
+ * 6. Temperature Measurement
+ * 7. Diagnostics
+ *
+ * The priority above only affects the order in which requests are sent to the
+ * uPower firmware: request to the higher priority Service Group is sent first,
+ * even if the call was made later, if there is an MU transmission pending,
+ * blocking it. The service priorities in the firmware depend on other factors.
+ *
+ * Services are requested using API functions. A service function returns with
+ * no error if a request was successfully made, but it doesn't mean the service
+ * was completed. The service is executed asynchronously, and returns a result
+ * (at least success/fail) via a callback or polling for service status.
+ * The possible service response codes are:
+ * - UPWR_RESP_OK = 0, : no error
+ * - UPWR_RESP_SG_BUSY : service group is busy
+ * - UPWR_RESP_SHUTDOWN : services not up or shutting down
+ * - UPWR_RESP_BAD_REQ : invalid request (usually invalid argumnents)
+ * - UPWR_RESP_BAD_STATE : system state doesn't allow perform the request
+ * - UPWR_RESP_UNINSTALLD : service or function not installed
+ * - UPWR_RESP_UNINSTALLED : service or function not installed (alias)
+ * - UPWR_RESP_RESOURCE : resource not available
+ * - UPWR_RESP_TIMEOUT : service timeout
+ */
+
+/**
+ * upwr_callb()-generic function pointer for a request return callback;
+ * @sg: request service group
+ * @func: service request function id.
+ * @errcode: error code.
+ * @ret: return value, if any. Note that a request may return a value even if
+ * service error is returned (errcode != UPWR_RESP_OK); that is dependent on
+ * the specific service.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: none (void)
+ */
+typedef void (*upwr_callb)(upwr_sg_t sg, uint32_t func,
+ upwr_resp_t errcode, ...);
+
+/**---------------------------------------------------------------
+ * INITIALIZATION, CONFIGURATION
+ *
+ * A reference uPower initialization sequence goes as follows:
+ *
+ * 1. host CPU calls upwr_init.
+ * 2. (optional) host checks the ROM version and SoC code calling upwr_vers(...)
+ * and optionally performs any configuration or workaround accordingly.
+ * 3. host CPU calls upwr_start to start the uPower services, passing a
+ * service option number.
+ * If no RAM code is loaded or it has no service options, the launch option
+ * number passed must be 0, which will start the services available in ROM.
+ * upwr_start also receives a pointer to a callback called by the API
+ * when the firmware is ready to receive service requests.
+ * The callback may be replaced by polling, calling upwr_req_status in a loop
+ * or upwr_poll_req_status; in this case the callback pointer may be NULL.
+ * A host may call upwr_start even if the services were already started by
+ * any host: if the launch option is the same, the response will be ok,
+ * but will indicate error if the services were already started with a
+ * different launch option.
+ * 4. host waits for the callback calling, or polling finishing;
+ * if no error is returned, it can start making service calls using the API.
+ *
+ * Variations on that reference sequence are possible:
+ * - the uPower services can be started using the ROM code only, which includes
+ * the basic Power Management services, among others, with launch option
+ * number = 0.
+ * The code RAM can be loaded while these services are running and,
+ * when the loading is done, the services can be re-started with these 2
+ * requests executed in order: upwr_xcp_shutdown and upwr_start,
+ * using the newly loaded RAM code (launch option > 0).
+ *
+ * NOTE: the initialization call upwr_init is not effective and
+ * returns error when called after the uPower services are started.
+ */
+
+/**
+ * upwr_init() - API initialization; must be the first API call after reset.
+ * @domain: SoC-dependent CPU domain id; identifier used by the firmware in
+ * many services. Defined by SoC-dependent type soc_domain_t found in
+ * upower_soc_defs.h.
+ * @muptr: pointer to the MU instance.
+ * @mallocptr: pointer to the memory allocation function
+ * @physaddrptr: pointer to the function to convert pointers to
+ * physical addresses. If NULL, no conversion is made (pointer=physical address)
+ * @isrinstptr: pointer to the function to install the uPower ISR callbacks;
+ * the function receives the pointers to the MU tx/rx and Exception ISRs
+ * callbacks, which must be called from the actual system ISRs.
+ * The function pointed by isrinstptr must also enable the interrupt at the
+ * core/interrupt controller, but must not enable the interrupt at the MU IP.
+ * The system ISRs are responsible for dealing with the interrupt controller,
+ * performing any other context save/restore, and any other housekeeping.
+ * @lockptr: pointer to a function that prevents MU interrupts (if argrument=1)
+ * or allows it (if argument=0). The API calls this function to make small
+ * specific code portions thread safe. Only MU interrupts must be avoided,
+ * the code may be suspended for other reasons.
+ * If no MU interrupts can happen during the execution of an API call or
+ * callback, even if enabled, for some other reason (e.g. interrupt priority),
+ * then this argument may be NULL.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if failed to allocate memory, or use some other resource.
+ * -2 if any argument is invalid.
+ * -3 if failed to send the ping message.
+ * -4 if failed to receive the initialization message, or was invalid
+ */
+
+/* malloc function ptr */
+typedef void* (*upwr_malloc_ptr_t)(unsigned int size);
+
+/* pointer->physical address conversion function ptr */
+typedef void* (*upwr_phyadr_ptr_t)(const void *addr);
+
+typedef uint32_t upwr_api_state_t;
+
+extern volatile upwr_api_state_t api_state;
+
+/*
+ * upwr_lock_ptr_t: pointer to a function that prevents MU interrupts
+ * (if argrument lock=1) or allows it (if argument lock=0).
+ * The API calls this function to make small specific code portions thread safe.
+ * Only MU interrupts must be avoided, the code may be suspended for other
+ * reasons.
+ */
+typedef void (*upwr_lock_ptr_t)(int lock);
+
+typedef void (*upwr_isr_callb)(void);
+
+typedef void (*upwr_inst_isr_ptr_t)(upwr_isr_callb txrx_isr,
+ upwr_isr_callb excp_isr);
+void upwr_start_callb(void);
+
+int upwr_init(soc_domain_t domain, struct MU_t *muptr,
+ const upwr_malloc_ptr_t mallocptr,
+ const upwr_phyadr_ptr_t phyadrptr,
+ const upwr_inst_isr_ptr_t isrinstptr,
+ const upwr_lock_ptr_t lockptr);
+
+/**
+ * upwr_start() - Starts the uPower services.
+ * @launchopt: a number to select between multiple launch options,
+ * that may define, among other things, which services will be started,
+ * or which services implementations, features etc.
+ * launchopt = 0 selects a subset of services implemented in ROM;
+ * any other number selects service sets implemented in RAM, launched
+ * by the firmware function ram_launch; if an invalid launchopt value is passed,
+ * no services are started, and the callback returns error (see below).
+ * @rdycallb: pointer to the callback to be called when the uPower is ready
+ * to receive service requests. NULL if no callback needed.
+ * The callback receives as arguments the RAM firmware version numbers.
+ * If all 3 numbers (vmajor, vminor, vfixes) are 0, that means the
+ * service launching failed.
+ * Firmware version numbers will be the same as ROM if launchopt = 0,
+ * selecting the ROM services.
+ *
+ * upwr_start can be called by any domain even if the services are already
+ * started: it has no effect, returning success, if the launch option is the
+ * same as the one that actually started the service, and returns error if
+ * called with a different option.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if a resource failed,
+ * -2 if the domain passed is the same as the caller,
+ * -3 if called in an invalid API state
+ */
+
+extern void upwr_txrx_isr(void);
+
+typedef void (*upwr_rdy_callb)(uint32_t vmajor, uint32_t vminor, uint32_t vfixes);
+
+int upwr_start(uint32_t launchopt, const upwr_rdy_callb rdycallb);
+
+
+/**---------------------------------------------------------------
+ * EXCEPTION SERVICE GROUP
+ */
+
+/**
+ * upwr_xcp_config() - Applies general uPower configurations.
+ * @config: pointer to the uPower SoC-dependent configuration struct
+ * upwr_xcp_config_t defined in upower_soc_defs.h. NULL may be passed, meaning
+ * a request to read the configuration, in which case it appears in the callback
+ * argument ret, or can be pointed by argument retptr in the upwr_req_status and
+ * upwr_poll_req_status calls, casted to upwr_xcp_config_t.
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the configuration, or NULL if no callback needed (polling used instead).
+ *
+ * Some configurations are targeted for a specific domain (see the struct
+ * upwr_xcp_config_t definition in upower_soc_defs.h); this call has implicit
+ * domain target (the same domain from which is called).
+ *
+ * The return value is always the current configuration value, either in a
+ * read-only request (config = NULL) or after setting a new configuration
+ * (non-NULL config).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+
+int upwr_xcp_config(const upwr_xcp_config_t *config, const upwr_callb callb);
+
+/**
+ * upwr_xcp_sw_alarm() - Makes uPower issue an alarm interrupt to given domain.
+ * @domain: identifier of the domain to alarm. Defined by SoC-dependent type
+ * soc_domain_t found in upower_soc_defs.h.
+ * @code: alarm code. Defined by SoC-dependent type upwr_alarm_t found in
+ * upower_soc_defs.h.
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the alarm, or NULL if no callback needed (polling used instead).
+ *
+ * The function requests the uPower to issue an alarm of the given code as if
+ * it had originated internally. This service is useful mainly to test the
+ * system response to such alarms, or to make the system handle a similar alarm
+ * situation detected externally to uPower.
+ *
+ * The system ISR/code handling the alarm may retrieve the alarm code by calling
+ * the auxiliary function upwr_alarm_code.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+
+int upwr_xcp_sw_alarm(soc_domain_t domain, upwr_alarm_t code,
+ const upwr_callb callb);
+
+/**
+ * upwr_xcp_set_ddr_retention() - M33/A35 can use this API to set/clear ddr retention
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @enable: true, means that set ddr retention, false clear ddr retention.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+
+int upwr_xcp_set_ddr_retention(soc_domain_t domain, uint32_t enable,
+ const upwr_callb callb);
+
+/**
+ * upwr_xcp_set_mipi_dsi_ena() - M33/A35 can use this API to set/clear mipi dsi ena
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @enable: true, means that set ddr retention, false clear ddr retention.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+
+int upwr_xcp_set_mipi_dsi_ena(soc_domain_t domain, uint32_t enable,
+ const upwr_callb callb);
+
+/**
+ * upwr_xcp_get_mipi_dsi_ena() - M33/A35 can use this API to get mipi dsi ena status
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+
+int upwr_xcp_get_mipi_dsi_ena(soc_domain_t domain, const upwr_callb callb);
+
+/**
+ * upwr_xcp_set_osc_mode() - M33/A35 can use this API to set uPower OSC mode
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @osc_mode, 0 means low frequency, not 0 means high frequency.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_osc_mode(soc_domain_t domain, uint32_t osc_mode,
+ const upwr_callb callb);
+
+/**
+ * upwr_xcp_set_rtd_use_ddr() - M33 call this API to inform uPower, M33 is using ddr
+ * @domain: identifier of the caller domain.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @is_use_ddr: not 0, true, means that RTD is using ddr. 0, false, means that, RTD
+ * is not using ddr.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_rtd_use_ddr(soc_domain_t domain, uint32_t is_use_ddr,
+ const upwr_callb callb);
+
+/**
+ * upwr_xcp_set_rtd_apd_llwu() - M33/A35 can use this API to set/clear rtd_llwu apd_llwu
+ * @domain: set which domain (RTD_DOMAIN, APD_DOMAIN) LLWU.
+ * soc_domain_t found in upower_soc_defs.h.
+ * @enable: true, means that set rtd_llwu or apd_llwu, false clear rtd_llwu or apd_llwu.
+ * @callb: NULL
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_set_rtd_apd_llwu(soc_domain_t domain, uint32_t enable,
+ const upwr_callb callb);
+/**
+ * upwr_xcp_shutdown() - Shuts down all uPower services and power mode tasks.
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the shutdown, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * At the callback the uPower/API is back to initialization/start-up phase,
+ * so service request calls return error.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_shutdown(const upwr_callb callb);
+
+/**
+ * upwr_xcp_i2c_access() - Performs an access through the uPower I2C interface.
+ * @addr: I2C slave address, up to 10 bits.
+ * @data_size: determines the access direction and data size in bytes, up to 4;
+ * negetive data_size determines a read access with size -data_size;
+ * positive data_size determines a write access with size data_size;
+ * data_size=0 is invalid, making the service return error UPWR_RESP_BAD_REQ.
+ * @subaddr_size: size of the sub-address in bytes, up to 4; if subaddr_size=0,
+ * no subaddress is used.
+ * @subaddr: sub-address, only used if subaddr_size > 0.
+ * @wdata: write data, up to 4 bytes; ignored if data_size < 0 (read)
+ * @callb: pointer to the callback to be called when the uPower has finished
+ * the access, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_EXCEPT as the service group argument.
+ *
+ * The service performs a read (data_size < 0) or a write (data_size > 0) of
+ * up to 4 bytes on the uPower I2C interface. The data read from I2C comes via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ *
+ * Sub-addressing is supported, with sub-address size determined by the argument
+ * subaddr_size, up to 4 bytes. Sub-addressing is not used if subaddr_size=0.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_xcp_i2c_access(uint16_t addr, int8_t data_size, uint8_t subaddr_size,
+ uint32_t subaddr, uint32_t wdata,
+ const upwr_callb callb);
+
+
+/**---------------------------------------------------------------
+ * POWER MANAGEMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_pwm_dom_power_on() - Commands uPower to power on the platform of other
+ * domain (not necessarily its core(s)); does not release the core reset.
+ * @domain: identifier of the domain to power on. Defined by SoC-dependent type
+ * soc_domain_t found in upower_soc_defs.h.
+ * @boot_start: must be 1 to start the domain core(s) boot(s), releasing
+ * its (their) resets, or 0 otherwise.
+ * @pwroncallb: pointer to the callback to be called when the uPower has
+ * finished the power on procedure, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -2 if the domain passed is the same as the caller,
+ * -3 if called in an invalid API state
+ */
+int upwr_pwm_dom_power_on(soc_domain_t domain, int boot_start,
+ const upwr_callb pwroncallb);
+
+/**
+ * upwr_pwm_boot_start() - Commands uPower to release the reset of other CPU(s),
+ * starting their boots.
+ * @domain: identifier of the domain to release the reset. Defined by
+ * SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @bootcallb: pointer to the callback to be called when the uPower has finished
+ * the boot start procedure, or NULL if no callback needed
+ * (polling used instead).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * The callback calling doesn't mean the CPUs boots have finished:
+ * it only indicates that uPower released the CPUs resets, and can receive
+ * other power management service group requests.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -2 if the domain passed is the same as the caller,
+ * -3 if called in an invalid API state
+ */
+int upwr_pwm_boot_start(soc_domain_t domain, const upwr_callb bootcallb);
+
+/**
+ * upwr_pwm_param() - Changes Power Management parameters.
+ * @param: pointer to a parameter structure upwr_pwm_param_t, SoC-dependent,
+ * defined in upwr_soc_defines.h. NULL may be passed, meaning
+ * a request to read the parameter set, in which case it appears in the callback
+ * argument ret, or can be pointed by argument retptr in the upwr_req_status and
+ * upwr_poll_req_status calls, casted to upwr_pwm_param_t.
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The return value is always the current parameter set value, either in a
+ * read-only request (param = NULL) or after setting a new parameter
+ * (non-NULL param).
+ *
+ * Some parameters may be targeted for a specific domain (see the struct
+ * upwr_pwm_param_t definition in upower_soc_defs.h); this call has implicit
+ * domain target (the same domain from which is called).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded or
+ * not.
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_pwm_param(upwr_pwm_param_t *param, const upwr_callb callb);
+
+/**
+ * upwr_pwm_chng_reg_voltage() - Changes the voltage at a given regulator.
+ * @reg: regulator id.
+ * @volt: voltage value; value unit is SoC-dependent, converted from mV by the
+ * macro UPWR_VOLT_MILIV, or from micro-Volts by the macro UPWR_VOLT_MICROV,
+ * both macros in upower_soc_defs.h
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to change the voltage of the given regulator.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_chng_reg_voltage(uint32_t reg, uint32_t volt, upwr_callb callb);
+
+/**
+ * upwr_pwm_freq_setup() - Determines the next frequency target for a given
+ * domain and current frequency.
+ * @domain: identifier of the domain to change frequency. Defined by
+ * SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @rail: the pmic regulator number for the target domain.
+ * @stage: DVA adjust stage
+ * refer to upower_defs.h "DVA adjust stage"
+ * @target_freq: the target adjust frequency, accurate to MHz
+ *
+ * refer to upower_defs.h structure definition upwr_pwm_freq_msg
+ *
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The DVA algorithm is broken down into two phases.
+ * The first phase uses a look up table to get a safe operating voltage
+ * for the requested frequency.
+ * This voltage is guaranteed to work over process and temperature.
+ *
+ * The second step of the second phase is to measure the temperature
+ * using the uPower Temperature Sensor module.
+ * This is accomplished by doing a binary search of the TSEL bit field
+ * in the Temperature Measurement Register (TMR).
+ * The search is repeated until the THIGH bit fields in the same register change value.
+ * There are 3 temperature sensors in 8ULP (APD, AVD, and RTD).
+ *
+ *
+ * The second phase is the fine adjust of the voltage.
+ * This stage is entered only when the new frequency requested
+ * by application was already set as well as the voltage for that frequency.
+ * The first step of the fine adjust is to find what is the current margins
+ * for the monitored critical paths, or, in other words,
+ * how many delay cells will be necessary to generate a setup-timing violation.
+ * The function informs uPower that the given domain frequency has changed or
+ * will change to the given value. uPower firmware will then adjust voltage and
+ * bias to cope with the new frequency (if decreasing) or prepare for it
+ * (if increasing). The function must be called after decreasing the frequency,
+ * and before increasing it. The actual increase in frequency must not occur
+ * before the service returns its response.
+ *
+ * So, for increase clock frequency case, user need to call this API twice,
+ * the first stage gross adjust and the second stage fine adjust.
+ *
+ * for reduce clock frequency case, user can only call this API once,
+ * full stage (combine gross stage and fine adjust)
+ *
+ * The request is executed if arguments are within range.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_freq_setup(soc_domain_t domain, uint32_t rail, uint32_t stage,
+ uint32_t target_freq, upwr_callb callb);
+
+/**
+ * upwr_pwm_power_on()- Powers on (not off) one or more switches and ROM/RAMs.
+ * @swton: pointer to an array of words that tells which power switches to
+ * turn on. Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective switch must be turned on,
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no switch will be changed,
+ * unless a memory that it feeds must be turned on.
+ * WARNING: swton must not point to the first shared memory address.
+ * @memon: pointer to an array of words that tells which memories to turn on.
+ * Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective memory must be turned on, both array and
+ * periphery logic;
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no memory will be changed.
+ * WARNING: memon must not point to the first shared memory address.
+ * @callb: pointer to the callback called when configurations are applyed.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to turn on the PMC and memory array/peripheral
+ * switches that control their power, as specified above.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate memory power state related to overall system state.
+ *
+ * If a memory is requested to turn on, but the power switch that feeds that
+ * memory is not, the power switch will be turned on anyway, if the pwron
+ * array is not provided (that is, if pwron is NULL).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_power_on(const uint32_t swton[], const uint32_t memon[],
+ upwr_callb callb);
+
+/**
+ * upwr_pwm_power_off()- Powers off (not on) one or more switches and ROM/RAMs.
+ * @swtoff: pointer to an array of words that tells which power switches to
+ * turn off. Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective switch must be turned off,
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no switch will be changed.
+ * WARNING: swtoff must not point to the first shared memory address.
+ * @memoff: pointer to an array of words that tells which memories to turn off.
+ * Each word in the array has 1 bit for each switch.
+ * A bit=1 means the respective memory must be turned off, both array and
+ * periphery logic;
+ * bit = 0 means it will stay unchanged (on or off).
+ * The pointer may be set to NULL, in which case no memory will be changed,
+ * but notice it may be turned off if the switch that feeds it is powered off.
+ * WARNING: memoff must not point to the first shared memory address.
+ * @callb: pointer to the callback called when configurations are applyed.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to turn off the PMC and memory array/peripheral
+ * switches that control their power, as specified above.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate memory power state related to overall system state.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_power_off(const uint32_t swtoff[], const uint32_t memoff[],
+ upwr_callb callb);
+
+/**
+ * upwr_pwm_mem_retain()- Configures one or more memory power switches to
+ * retain its contents, having the power array on, while its peripheral logic
+ * is turned off.
+ * @mem: pointer to an array of words that tells which memories to put in a
+ * retention state. Each word in the array has 1 bit for each memory.
+ * A bit=1 means the respective memory must be put in retention state,
+ * bit = 0 means it will stay unchanged (retention, fully on or off).
+ * @callb: pointer to the callback called when configurations are applyed.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to turn off the memory peripheral and leave
+ * its array on, as specified above.
+ * The request is executed if arguments are within range.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_mem_retain(const uint32_t mem[], upwr_callb callb);
+
+/**
+ * upwr_pwm_chng_switch_mem() - Turns on/off power on one or more PMC switches
+ * and memories, including their array and peripheral logic.
+ * @swt: pointer to a list of PMC switches to be opened/closed.
+ * The list is structured as an array of struct upwr_switch_board_t
+ * (see upower_defs.h), each one containing a word for up to 32 switches,
+ * one per bit. A bit = 1 means switch closed, bit = 0 means switch open.
+ * struct upwr_switch_board_t also specifies a mask with 1 bit for each
+ * respective switch: mask bit = 1 means the open/close action is applied,
+ * mask bit = 0 means the switch stays unchanged.
+ * The pointer may be set to NULL, in which case no switch will be changed,
+ * unless a memory that it feeds must be turned on.
+ * WARNING: swt must not point to the first shared memory address.
+ * @mem: pointer to a list of switches to be turned on/off.
+ * The list is structured as an array of struct upwr_mem_switches_t
+ * (see upower_defs.h), each one containing 2 word for up to 32 switches,
+ * one per bit, one word for the RAM array power switch, other for the
+ * RAM peripheral logic power switch. A bit = 1 means switch closed,
+ * bit = 0 means switch open.
+ * struct upwr_mem_switches_t also specifies a mask with 1 bit for each
+ * respective switch: mask bit = 1 means the open/close action is applied,
+ * mask bit = 0 means the switch stays unchanged.
+ * The pointer may be set to NULL, in which case no memory switch will be
+ * changed, but notice it may be turned off if the switch that feeds it is
+ * powered off.
+ * WARNING: mem must not point to the first shared memory address.
+ * @callb: pointer to the callback called when the configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the PMC switches and/or memory power
+ * as specified above.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate switch combinations and overall system state.
+ *
+ * If a memory is requested to turn on, but the power switch that feeds that
+ * memory is not, the power switch will be turned on anyway, if the swt
+ * array is not provided (that is, if swt is NULL).
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Callback or polling may return error if the service contends for a resource
+ * already being used by a power mode transition or an ongoing service in
+ * another domain.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy.
+ * -2 if a pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_chng_switch_mem(const struct upwr_switch_board_t swt[],
+ const struct upwr_mem_switches_t mem[],
+ upwr_callb callb);
+
+/**
+ * upwr_pwm_pmode_config() - Configures a given power mode in a given domain.
+ * @domain: identifier of the domain to which the power mode belongs.
+ * Defined by SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @pmode: SoC-dependent power mode identifier defined by type abs_pwr_mode_t
+ * found in upower_soc_defs.h.
+ * @config: pointer to an SoC-dependent struct defining the power mode
+ * configuration, found in upower_soc_defs.h.
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the power mode configuration as
+ * specified above. The request is executed if arguments are within range,
+ * and complies with SoC-dependent restrictions on value combinations.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if the pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_pmode_config(soc_domain_t domain, abs_pwr_mode_t pmode,
+ const void *config, upwr_callb callb);
+
+
+
+/**
+ * upwr_pwm_reg_config() - Configures the uPower internal regulators.
+ * @config: pointer to the struct defining the regulator configuration;
+ * the struct upwr_reg_config_t is defined in the file upower_defs.h.
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change/define the configurations of the
+ * internal regulators.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * The service may fail with error UPWR_RESP_RESOURCE if a power mode transition
+ * or the same service (called from another domain) is executing simultaneously.
+ * This error should be interpreted as a "try later" response, as the service
+ * will succeed once those concurrent executions are done, and no other is
+ * started.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if the pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_pwm_reg_config(const struct upwr_reg_config_t *config,
+ upwr_callb callb);
+
+/**
+ * upwr_pwm_chng_dom_bias() - Changes the domain bias.
+ * @bias: pointer to a domain bias configuration struct (see upower_soc_defs.h).
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the domain bias configuration as
+ * specified above. The request is executed if arguments are within range,
+ * with no protections regarding the adequate value combinations and
+ * overall system state.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+
+int upwr_pwm_chng_dom_bias(const struct upwr_dom_bias_cfg_t *bias,
+ upwr_callb callb);
+
+/**
+ * upwr_pwm_chng_mem_bias()- Changes a ROM/RAM power bias.
+ * @domain: identifier of the domain upon which the bias is applied.
+ * Defined by SoC-dependent type soc_domain_t found in upower_soc_defs.h.
+ * @bias: pointer to a memory bias configuration struct (see upower_soc_defs.h).
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change the memory bias configuration as
+ * specified above. The request is executed if arguments are within range,
+ * with no protections regarding the adequate value combinations and
+ * overall system state.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+
+int upwr_pwm_chng_mem_bias(soc_domain_t domain,
+ const struct upwr_mem_bias_cfg_t *bias,
+ upwr_callb callb);
+
+/**---------------------------------------------------------------
+ * VOLTAGE MANAGEMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_vtm_pmic_cold_reset() -request cold reset the pmic.
+ * pmic will power cycle all the regulators
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to cold reset the pmic.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_pmic_cold_reset(upwr_callb callb);
+
+/**
+ * upwr_vtm_set_pmic_mode() -request uPower set pmic mode
+ * @pmic_mode: the target mode need to be set
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to set pmic mode
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_set_pmic_mode(uint32_t pmic_mode, upwr_callb callb);
+
+/**
+ * upwr_vtm_chng_pmic_voltage() - Changes the voltage of a given rail.
+ * @rail: pmic rail id.
+ * @volt: the target voltage of the given rail, accurate to uV
+ * If pass volt value 0, means that power off this rail.
+ * @callb: response callback pointer; NULL if no callback needed.
+ *
+ * The function requests uPower to change the voltage of the given rail.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_chng_pmic_voltage(uint32_t rail, uint32_t volt, upwr_callb callb);
+
+/**
+ * upwr_vtm_get_pmic_voltage() - Get the voltage of a given ral.
+ * @rail: pmic rail id.
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to get the voltage of the given rail.
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * The voltage data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_get_pmic_voltage(uint32_t rail, upwr_callb callb);
+
+
+/**
+ * upwr_vtm_power_measure() - request uPower to measure power consumption
+ * @ssel: This field determines which power switches will have their currents
+ * sampled to be accounted for a
+ * current/power measurement. Support 0~7
+
+ * SSEL bit # Power Switch
+ * 0 M33 core complex/platform/peripherals
+ * 1 Fusion Core and Peripherals
+ * 2 A35[0] core complex
+ * 3 A35[1] core complex
+ * 4 3DGPU
+ * 5 HiFi4
+ * 6 DDR Controller (PHY and PLL NOT included)
+ * 7 PXP, EPDC
+ *
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to measure power consumption
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * The power consumption data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Accurate to uA
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_power_measure(uint32_t ssel, upwr_callb callb);
+
+/**
+ * upwr_vtm_vmeter_measure() - request uPower to measure voltage
+ * @vdetsel: Voltage Detector Selector, support 0~3
+ * 00b - RTD sense point
+ * 01b - LDO output
+ * 10b - APD domain sense point
+ * 11b - AVD domain sense point
+ * Refer to upower_defs.h
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to use vmeter to measure voltage
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_VOLTM as the service group argument.
+ *
+ * The voltage data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Refer to RM COREREGVL (Core Regulator Voltage Level)
+ * uPower return VDETLVL to user, user can calculate the real voltage:
+ *
+ * 0b000000(0x00) - 0.595833V
+ * 0b100110(0x26) - 1.007498V
+ * <value> - 0.595833V + <value>x10.8333mV
+ * 0b110010(0x32) - 1.138V
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_vmeter_measure(uint32_t vdetsel, upwr_callb callb);
+
+/**
+ * upwr_vtm_pmic_config() - Configures the SoC PMIC (Power Management IC).
+ * @config: pointer to a PMIC-dependent struct defining the PMIC configuration.
+ * @size: size of the struct pointed by config, in bytes.
+ * @callb: pointer to the callback called when configurations are applied.
+ * NULL if no callback is required.
+ *
+ * The function requests uPower to change/define the PMIC configuration.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_PWRMGMT as the service group argument.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok, -1 if service group is busy,
+ * -2 if the pointer conversion to physical address failed,
+ * -3 if called in an invalid API state.
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_vtm_pmic_config(const void *config, uint32_t size, upwr_callb callb);
+
+/**---------------------------------------------------------------
+ * TEMPERATURE MANAGEMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_tpm_get_temperature() - request uPower to get temperature of one temperature sensor
+ * @sensor_id: temperature sensor ID, support 0~2
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to measure temperature
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_TEMPM as the service group argument.
+ *
+ * The temperature data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ *
+ * uPower return TSEL to the caller (M33 or A35), caller calculate the real temperature
+ * Tsh = 0.000002673049*TSEL[7:0]^3 + 0.0003734262*TSEL[7:0]^2 +
+0.4487042*TSEL[7:0] - 46.98694
+ *
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_tpm_get_temperature(uint32_t sensor_id, upwr_callb callb);
+
+/**---------------------------------------------------------------
+ * DELAY MANAGEMENT SERVICE GROUP
+ */
+
+/**
+ * upwr_dlm_get_delay_margin() - request uPower to get delay margin
+ * @path: The critical path
+ * @index: Use whitch delay meter
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to get delay margin
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_DELAYM as the service group argument.
+ *
+ * The delay margin data read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_dlm_get_delay_margin(uint32_t path, uint32_t index, upwr_callb callb);
+
+/**
+ * upwr_dlm_set_delay_margin() - request uPower to set delay margin
+ * @path: The critical path
+ * @index: Use whitch delay meter
+ * @delay_margin: the value of delay margin
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to set delay margin
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_DELAYM as the service group argument.
+ *
+ * The result of the corresponding critical path, failed or not read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_dlm_set_delay_margin(uint32_t path, uint32_t index, uint32_t delay_margin, upwr_callb callb);
+
+/**
+ * upwr_dlm_process_monitor() - request uPower to do process monitor
+ * @chain_sel: Chain Cell Type Selection
+ * Select the chain to be used for the clock signal generation.
+ * Support two types chain cell, 0~1
+0b - P4 type delay cells selected
+1b - P16 type delay cells selected
+ * @callb: response callback pointer; NULL if no callback needed.
+ * (polling used instead)
+ *
+ * The function requests uPower to do process monitor
+ * The request is executed if arguments are within range, with no protections
+ * regarding the adequate voltage value for the given domain process,
+ * temperature and frequency.
+ *
+ * A callback can be optionally registered, and will be called upon the arrival
+ * of the request response from the uPower firmware, telling if it succeeded
+ * or not.
+ *
+ * A callback may not be registered (NULL pointer), in which case polling has
+ * to be used to check the response, by calling upwr_req_status or
+ * upwr_poll_req_status, using UPWR_SG_DELAYM as the service group argument.
+ *
+ * The result of process monitor, failed or not read from uPower via
+ * the callback argument ret, or written to the variable pointed by retptr,
+ * if polling is used (calls upwr_req_status or upwr_poll_req_status).
+ * ret (or *retptr) also returns the data written on writes.
+ * upower fw needs support cocurrent request from M33 and A35.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ * Note that this is not the error response from the request itself:
+ * it only tells if the request was successfully sent to the uPower.
+ */
+int upwr_dlm_process_monitor(uint32_t chain_sel, upwr_callb callb);
+
+/**---------------------------------------------------------------
+ * DIAGNOSE SERVICE GROUP
+ */
+
+/**
+ * upwr_dgn_mode() - Sets the diagnostic mode.
+ * @mode: diagnostic mode, which can be:
+ * - UPWR_DGN_NONE: no diagnostic recorded
+ * - UPWR_DGN_TRACE: warnings, errors, service, internal activity recorded
+ * - UPWR_DGN_SRVREQ: warnings, errors, service activity recorded
+ * - UPWR_DGN_WARN: warnings and errors recorded
+ * - UPWR_DGN_ALL: trace, service, warnings, errors, task state recorded
+ * - UPWR_DGN_ERROR: only errors recorded
+ * - UPWR_DGN_ALL2ERR: record all until an error occurs,
+ * freeze recording on error
+ * - UPWR_DGN_ALL2HLT: record all until an error occurs,
+ * executes an ebreak on error, which halts the core if enabled through
+ * the debug interface
+ * @callb: pointer to the callback called when mode is changed.
+ * NULL if no callback is required.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok,
+ * -1 if service group is busy,
+ * -3 if called in an invalid API state
+ */
+int upwr_dgn_mode(upwr_dgn_mode_t mode, const upwr_callb callb);
+
+/**---------------------------------------------------------------
+ * AUXILIARY CALLS
+ */
+
+/**
+ * upwr_rom_version() - informs the ROM firwmware version.
+ * @vmajor: pointer to the variable to get the firmware major version number.
+ * @vminor: pointer to the variable to get the firmware minor version number.
+ * @vfixes: pointer to the variable to get the firmware fixes number.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: SoC id.
+ */
+uint32_t upwr_rom_version(uint32_t *vmajor, uint32_t *vminor, uint32_t *vfixes);
+
+/**
+ * upwr_ram_version() - informs the RAM firwmware version.
+ * @vminor: pointer to the variable to get the firmware minor version number.
+ * @vfixes: pointer to the variable to get the firmware fixes number.
+ *
+ * The 3 values returned are 0 if no RAM firmwmare was loaded and initialized.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: firmware major version number.
+ */
+uint32_t upwr_ram_version(uint32_t *vminor, uint32_t *vfixes);
+
+/**
+ * upwr_req_status() - tells the status of the service group request, and
+ * returns a request return value, if any.
+ * @sg: service group of the request
+ * @sgfptr: pointer to the variable that will hold the function id of
+ * the last request completed; can be NULL, in which case it is not used.
+ * @errptr: pointer to the variable that will hold the error code;
+ * can be NULL, in which case it is not used.
+ * @retptr: pointer to the variable that will hold the value returned
+ * by the last request completed (invalid if the last request completed didn't
+ * return any value); can be NULL, in which case it is not used.
+ * Note that a request may return a value even if service error is returned
+ * (*errptr != UPWR_RESP_OK): that is dependent on the specific service.
+ *
+ * This call can be used in a poll loop of a service request completion in case
+ * a callback was not registered.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: service request status: succeeded, failed, or ongoing (busy)
+ */
+
+/* service request status */
+typedef enum {
+ UPWR_REQ_OK, /* request succeeded */
+ UPWR_REQ_ERR, /* request failed */
+ UPWR_REQ_BUSY /* request execution ongoing */
+} upwr_req_status_t;
+
+upwr_req_status_t upwr_req_status(upwr_sg_t sg,
+ uint32_t *sgfptr,
+ upwr_resp_t *errptr,
+ int *retptr);
+
+/**
+ * upwr_poll_req_status() - polls the status of the service group request, and
+ * returns a request return value, if any.
+ * @sg: service group of the request
+ * @sgfptr: pointer to the variable that will hold the function id of
+ * the last request completed; can be NULL, in which case it is not used.
+ * @errptr: pointer to the variable that will hold the error code;
+ * can be NULL, in which case it is not used.
+ * @retptr: pointer to the variable that will hold the value returned
+ * by the last request completed (invalid if the last request completed didn't
+ * return any value); can be NULL, in which case it is not used.
+ * Note that a request may return a value even if service error is returned
+ * (*errptr != UPWR_RESP_OK): that is dependent on the specific service.
+ * @attempts: maximum number of polling attempts; if attempts > 0 and is
+ * reached with no service response received, upwr_poll_req_status returns
+ * UPWR_REQ_BUSY and variables pointed by sgfptr, retptr and errptr are not
+ * updated; if attempts = 0, upwr_poll_req_status waits "forever".
+ *
+ * This call can be used to poll a service request completion in case a
+ * callback was not registered.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: service request status: succeeded, failed, or ongoing (busy)
+ */
+upwr_req_status_t upwr_poll_req_status(upwr_sg_t sg,
+ uint32_t *sgfptr,
+ upwr_resp_t *errptr,
+ int *retptr,
+ uint32_t attempts);
+
+/**
+ * upwr_alarm_code() - returns the alarm code of the last alarm occurrence.
+ *
+ * The value returned is not meaningful if no alarm was issued by uPower.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: alarm code, as defined by the type upwr_alarm_t in upwr_soc_defines.h
+ */
+upwr_alarm_t upwr_alarm_code(void);
+
+/**---------------------------------------------------------------
+ * TRANSMIT/RECEIVE PRIMITIVES
+ * ---------------------------------------------------------------
+ */
+
+typedef void (*UPWR_TX_CALLB_FUNC_T)(void);
+typedef void (*UPWR_RX_CALLB_FUNC_T)(void);
+
+/**
+ * upwr_tx() - queues a message for transmission.
+ * @msg : pointer to the message sent.
+ * @size: message size in 32-bit words
+ * @callback: pointer to a function to be called when transmission done;
+ * can be NULL, in which case no callback is done.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: number of vacant positions left in the transmission queue, or
+ * -1 if the queue was already full when upwr_tx was called, or
+ * -2 if any argument is invalid (like size off-range)
+ */
+int upwr_tx(const uint32_t *msg, unsigned int size,
+ UPWR_TX_CALLB_FUNC_T callback);
+
+/**
+ * upwr_rx() - unqueues a received message from the reception queue.
+ * @msg: pointer to the message destination buffer.
+ * @size: pointer to variable to hold message size in 32-bit words.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: number of messages remaining in the reception queue, or
+ * -1 if the queue was already empty when upwr_rx was called, or
+ * -2 if any argument is invalid (like mu off-range)
+ */
+int upwr_rx(char *msg, unsigned int *size);
+
+/**
+ * upwr_rx_callback() - sets up a callback for a message receiving event.
+ * @callback: pointer to a function to be called when a message arrives;
+ * can be NULL, in which case no callback is done.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: 0 if ok; -2 if any argument is invalid (mu off-range).
+ */
+int upwr_rx_callback(UPWR_RX_CALLB_FUNC_T callback);
+
+/**
+ * msg_copy() - copies a message.
+ * @dest: pointer to the destination message.
+ * @src : pointer to the source message.
+ * @size: message size in words.
+ *
+ * This is an auxiliary function used by the rest of the API calls.
+ * It is normally not called by the driver code, unless maybe for test purposes.
+ *
+ * Context: no sleep, no locks taken/released.
+ * Return: none (void)
+ */
+void msg_copy(char *dest, char *src, unsigned int size);
+
+#endif /* UPWR_API_H */
diff --git a/plat/imx/imx8ulp/upower/upower_defs.h b/plat/imx/imx8ulp/upower/upower_defs.h
new file mode 100644
index 0000000000..118d7e0c34
--- /dev/null
+++ b/plat/imx/imx8ulp/upower/upower_defs.h
@@ -0,0 +1,742 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/**
+ * Copyright 2019-2024 NXP
+ *
+ * KEYWORDS: micro-power uPower driver API
+ * -----------------------------------------------------------------------------
+ * PURPOSE: uPower driver API #defines and typedefs shared with the firmware
+ * -----------------------------------------------------------------------------
+ * PARAMETERS:
+ * PARAM NAME RANGE:DESCRIPTION: DEFAULTS: UNITS
+ * -----------------------------------------------------------------------------
+ * REUSE ISSUES: no reuse issues
+ */
+
+#ifndef UPWR_DEFS_H
+#define UPWR_DEFS_H
+
+#include <stdint.h>
+
+#ifndef UPWR_PMC_SWT_WORDS
+#define UPWR_PMC_SWT_WORDS (1U)
+#endif
+
+#ifndef UPWR_PMC_MEM_WORDS
+#define UPWR_PMC_MEM_WORDS (2U)
+#endif
+
+/* ****************************************************************************
+ * DOWNSTREAM MESSAGES - COMMANDS/FUNCTIONS
+ * ****************************************************************************
+ */
+#define UPWR_SRVGROUP_BITS (4U)
+#define UPWR_FUNCTION_BITS (4U)
+#define UPWR_PWDOMAIN_BITS (4U)
+#define UPWR_HEADER_BITS \
+ (UPWR_SRVGROUP_BITS + UPWR_FUNCTION_BITS + UPWR_PWDOMAIN_BITS)
+#define UPWR_ARG_BITS (32U - UPWR_HEADER_BITS)
+#if ((UPWR_ARG_BITS & 1U) > 0U)
+#error "UPWR_ARG_BITS must be an even number"
+#endif
+#define UPWR_ARG64_BITS (64U - UPWR_HEADER_BITS)
+#define UPWR_HALF_ARG_BITS (UPWR_ARG_BITS >> 1U)
+#define UPWR_DUAL_OFFSET_BITS ((UPWR_ARG_BITS + 32U) >> 1U)
+
+/*
+ * message header: header fields common to all downstream messages.
+ */
+struct upwr_msg_hdr {
+ uint32_t domain : UPWR_PWDOMAIN_BITS; /* power domain */
+ uint32_t srvgrp : UPWR_SRVGROUP_BITS; /* service group */
+ uint32_t function : UPWR_FUNCTION_BITS; /* function */
+ uint32_t arg : UPWR_ARG_BITS; /* function-specific argument */
+};
+
+/* generic 1-word downstream message format */
+typedef union {
+ struct upwr_msg_hdr hdr;
+ uint32_t word; /* message first word */
+} upwr_down_1w_msg;
+
+/* generic 2-word downstream message format */
+typedef struct {
+ struct upwr_msg_hdr hdr;
+ uint32_t word2; /* message second word */
+} upwr_down_2w_msg;
+
+/* message format for functions that receive a pointer/offset */
+typedef struct {
+ struct upwr_msg_hdr hdr;
+ uint32_t ptr; /* config struct offset */
+} upwr_pointer_msg;
+
+/* message format for functions that receive 2 pointers/offsets */
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint64_t rsv : UPWR_HEADER_BITS;
+ uint64_t ptr0 : UPWR_DUAL_OFFSET_BITS;
+ uint64_t ptr1 : UPWR_DUAL_OFFSET_BITS;
+ } ptrs;
+} upwr_2pointer_msg;
+
+#define UPWR_SG_EXCEPT (0U) /* 0 = exception */
+#define UPWR_SG_PWRMGMT (1U) /* 1 = power management */
+#define UPWR_SG_DELAYM (2U) /* 2 = delay measurement */
+#define UPWR_SG_VOLTM (3U) /* 3 = voltage measurement */
+#define UPWR_SG_CURRM (4U) /* 4 = current measurement */
+#define UPWR_SG_TEMPM (5U) /* 5 = temperature measurement */
+#define UPWR_SG_DIAG (6U) /* 6 = diagnostic */
+#define UPWR_SG_COUNT (7U)
+
+typedef uint32_t upwr_sg_t;
+
+/* *************************************************************************
+ * Initialization - downstream
+ ***************************************************************************/
+typedef upwr_down_1w_msg upwr_start_msg; /* start command message */
+typedef upwr_down_1w_msg upwr_power_on_msg; /* power on command message */
+typedef upwr_down_1w_msg upwr_boot_start_msg; /* boot start command message */
+typedef union {
+ struct upwr_msg_hdr hdr;
+ upwr_power_on_msg power_on;
+ upwr_boot_start_msg boot_start;
+ upwr_start_msg start;
+} upwr_startup_down_msg;
+
+/* *************************************************************************
+ * Service Group EXCEPTION - downstream
+ ***************************************************************************/
+
+#define UPWR_XCP_INIT (0U) /* 0 = init msg (not a service request itself) */
+#define UPWR_XCP_PING (0U) /* 0 = also ping request, since its response isan init msg */
+#define UPWR_XCP_START (1U) /* 1 = service start: upwr_start *(not a service request itself) */
+#define UPWR_XCP_SHUTDOWN (2U) /* 2 = service shutdown: upwr_xcp_shutdown */
+#define UPWR_XCP_CONFIG (3U) /* 3 = uPower configuration: upwr_xcp_config */
+#define UPWR_XCP_SW_ALARM (4U) /* 4 = uPower software alarm: upwr_xcp_sw_alarm */
+#define UPWR_XCP_I2C (5U) /* 5 = I2C access: upwr_xcp_i2c_access */
+#define UPWR_XCP_SPARE_6 (6U) /* 6 = spare */
+#define UPWR_XCP_SET_DDR_RETN (7U) /* 7 = set/clear ddr retention */
+#define UPWR_XCP_SET_RTD_APD_LLWU (8U) /* 8 = set/clear rtd/apd llwu */
+#define UPWR_XCP_SPARE_8 (8U) /* 8 = spare */
+#define UPWR_XCP_SET_RTD_USE_DDR (9U) /* 9 = M33 core set it is using DDR or not */
+#define UPWR_XCP_SPARE_9 (9U) /* 9 = spare */
+#define UPWR_XCP_SPARE_10 (10U) /* 10 = spare */
+#define UPWR_XCP_SET_MIPI_DSI_ENA (10U) /* 10 = set/clear mipi dsi ena */
+#define UPWR_XCP_SPARE_11 (11U) /* 11 = spare */
+#define UPWR_XCP_GET_MIPI_DSI_ENA (11U) /* 11 = get mipi dsi ena status */
+#define UPWR_XCP_SPARE_12 (12U) /* 12 = spare */
+#define UPWR_XCP_SET_OSC_MODE (12U) /* 12 = set uPower OSC mode, high or low */
+#define UPWR_XCP_SPARE_13 (13U) /* 13 = spare */
+#define UPWR_XCP_SPARE_14 (14U) /* 14 = spare */
+#define UPWR_XCP_SPARE_15 (15U) /* 15 = spare */
+#define UPWR_XCP_F_COUNT (16U)
+
+typedef uint32_t upwr_xcp_f_t;
+typedef upwr_down_1w_msg upwr_xcp_ping_msg;
+typedef upwr_down_1w_msg upwr_xcp_shutdown_msg;
+typedef upwr_power_on_msg upwr_xcp_power_on_msg;
+typedef upwr_boot_start_msg upwr_xcp_boot_start_msg;
+typedef upwr_start_msg upwr_xcp_start_msg;
+typedef upwr_down_2w_msg upwr_xcp_config_msg;
+typedef upwr_down_1w_msg upwr_xcp_swalarm_msg;
+typedef upwr_down_1w_msg upwr_xcp_ddr_retn_msg;
+typedef upwr_down_1w_msg upwr_xcp_set_mipi_dsi_ena_msg;
+typedef upwr_down_1w_msg upwr_xcp_get_mipi_dsi_ena_msg;
+typedef upwr_down_1w_msg upwr_xcp_rtd_use_ddr_msg;
+typedef upwr_down_1w_msg upwr_xcp_rtd_apd_llwu_msg;
+typedef upwr_down_1w_msg upwr_xcp_set_osc_mode_msg;
+typedef upwr_pointer_msg upwr_xcp_i2c_msg;
+
+ /* structure pointed by message upwr_xcp_i2c_msg */
+typedef struct {
+ uint16_t addr;
+ int8_t data_size;
+ uint8_t subaddr_size;
+ uint32_t subaddr;
+ uint32_t data;
+} upwr_i2c_access;
+
+/* Exception all messages */
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ upwr_xcp_ping_msg ping; /* ping */
+ upwr_xcp_start_msg start; /* service start */
+ upwr_xcp_shutdown_msg shutdown; /* shutdown */
+ upwr_xcp_boot_start_msg bootstart; /* boot start */
+ upwr_xcp_config_msg config; /* uPower configuration */
+ upwr_xcp_swalarm_msg swalarm; /* software alarm */
+ upwr_xcp_i2c_msg i2c; /* I2C access */
+ upwr_xcp_ddr_retn_msg set_ddr_retn; /* set ddr retention msg */
+ upwr_xcp_set_mipi_dsi_ena_msg set_mipi_dsi_ena; /* set mipi dsi ena msg */
+ upwr_xcp_get_mipi_dsi_ena_msg get_mipi_dsi_ena; /* get mipi dsi ena msg */
+ upwr_xcp_rtd_use_ddr_msg set_rtd_use_ddr; /* set rtd is using ddr msg */
+ upwr_xcp_rtd_apd_llwu_msg set_llwu; /* set rtd/apd llwu msg */
+ upwr_xcp_set_osc_mode_msg set_osc_mode; /* set osc_mode msg */
+} upwr_xcp_msg;
+
+/* structure pointed by message upwr_volt_dva_req_id_msg */
+typedef struct {
+ uint32_t id_word0;
+ uint32_t id_word1;
+ uint32_t mode;
+} upwr_dva_id_struct;
+
+/**
+ * PMIC voltage accuracy is 12.5 mV, 12500 uV
+ */
+#define PMIC_VOLTAGE_MIN_STEP 12500U
+
+/* *************************************************************************
+ * Service Group POWER MANAGEMENT - downstream
+ ***************************************************************************/
+
+#define UPWR_PWM_REGCFG (0U) /* 0 = regulator config: upwr_pwm_reg_config */
+#define UPWR_PWM_DEVMODE (0U) /* deprecated, for old compile */
+#define UPWR_PWM_VOLT (1U) /* 1 = voltage change: upwr_pwm_chng_reg_voltage */
+#define UPWR_PWM_SWITCH (2U) /* 2 = switch control: upwr_pwm_chng_switch_mem */
+#define UPWR_PWM_PWR_ON (3U) /* 3 = switch/RAM/ROM power on: upwr_pwm_power_on */
+#define UPWR_PWM_PWR_OFF (4U) /* 4 = switch/RAM/ROM power off: upwr_pwm_power_off */
+#define UPWR_PWM_RETAIN (5U) /* 5 = retain memory array: upwr_pwm_mem_retain */
+#define UPWR_PWM_DOM_BIAS (6U) /* 6 = Domain bias control: upwr_pwm_chng_dom_bias */
+#define UPWR_PWM_MEM_BIAS (7U) /* 7 = Memory bias control: upwr_pwm_chng_mem_bias */
+#define UPWR_PWM_PMICCFG (8U) /* 8 = PMIC configuration: upwr_pwm_pmic_config */
+#define UPWR_PWM_PMICMOD (8U) /* deprecated, for old compile */
+#define UPWR_PWM_PES (9U) /* 9 so far, no use */
+#define UPWR_PWM_CONFIG (10U) /* 10= apply power mode defined configuration */
+#define UPWR_PWM_CFGPTR (11U) /* 11= configuration pointer */
+#define UPWR_PWM_DOM_PWRON (12U) /* 12 = domain power on: upwr_pwm_dom_power_on */
+#define UPWR_PWM_BOOT (13U) /* 13 = boot start: upwr_pwm_boot_start */
+#define UPWR_PWM_FREQ (14U) /* 14 = domain frequency setup */
+#define UPWR_PWM_PARAM (15U) /* 15 = power management parameters */
+#define UPWR_PWM_F_COUNT (16U)
+
+typedef uint32_t upwr_pwm_f_t;
+
+#define MAX_PMETER_SSEL 7U
+
+#define UPWR_VTM_CHNG_PMIC_RAIL_VOLT (0U) /* 0 = change pmic rail voltage */
+#define UPWR_VTM_GET_PMIC_RAIL_VOLT (1U) /* 1 = get pmic rail voltage */
+#define UPWR_VTM_PMIC_CONFIG (2U) /* 2 = configure PMIC IC */
+#define UPWR_VTM_DVA_DUMP_INFO (3U) /* 3 = dump dva information */
+#define UPWR_VTM_DVA_REQ_ID (4U) /* 4 = dva request ID array */
+#define UPWR_VTM_DVA_REQ_DOMAIN (5U) /* 5 = dva request domain */
+#define UPWR_VTM_DVA_REQ_SOC (6U) /* 6 = dva request the whole SOC */
+#define UPWR_VTM_PMETER_MEAS (7U) /* 7 = pmeter measure */
+#define UPWR_VTM_VMETER_MEAS (8U) /* 8 = vmeter measure */
+#define UPWR_VTM_PMIC_COLD_RESET (9U) /* 9 = pmic cold reset */
+#define UPWR_VTM_SET_DVFS_PMIC_RAIL (10U) /* 10 = set which domain use which pmic rail, for DVFS use */
+#define UPWR_VTM_SET_PMIC_MODE (11U) /* 11 = set pmic mode */
+#define UPWR_VTM_F_COUNT (16U)
+
+typedef uint32_t upwr_volt_f_t;
+
+#define VMETER_SEL_RTD 0U
+#define VMETER_SEL_LDO 1U
+#define VMETER_SEL_APD 2U
+#define VMETER_SEL_AVD 3U
+#define VMETER_SEL_MAX 3U
+
+/**
+ * The total TSEL count is 256
+ */
+#define MAX_TEMP_TSEL 256U
+
+/**
+ * Support 3 temperature sensor, sensor 0, 1, 2
+ */
+#define MAX_TEMP_SENSOR 2U
+
+#define UPWR_TEMP_GET_CUR_TEMP (0U) /* 0 = get current temperature */
+#define UPWR_TEMP_F_COUNT (1U)
+typedef uint32_t upwr_temp_f_t;
+
+#define UPWR_DMETER_GET_DELAY_MARGIN (0U) /* 0 = get delay margin */
+#define UPWR_DMETER_SET_DELAY_MARGIN (1U) /* 1 = set delay margin */
+#define UPWR_PMON_REQ (2U) /* 2 = process monitor service */
+#define UPWR_DMETER_F_COUNT (3U)
+
+typedef uint32_t upwr_dmeter_f_t;
+
+typedef upwr_down_1w_msg upwr_volt_pmeter_meas_msg;
+typedef upwr_down_1w_msg upwr_volt_pmic_set_mode_msg;
+typedef upwr_down_1w_msg upwr_volt_vmeter_meas_msg;
+
+struct upwr_reg_config_t {
+ uint32_t reg;
+};
+
+ /* set of 32 switches */
+struct upwr_switch_board_t {
+ uint32_t on; /* Switch on state,1 bit per instance */
+ uint32_t mask; /* actuation mask, 1 bit per instance */
+};
+
+ /* set of 32 RAM/ROM switches */
+struct upwr_mem_switches_t {
+ uint32_t array; /* RAM/ROM array state, 1 bit per instance */
+ uint32_t perif; /* RAM/ROM peripheral state, 1 bit per instance */
+ uint32_t mask; /* actuation mask, 1 bit per instance */
+};
+
+typedef upwr_down_1w_msg upwr_pwm_dom_pwron_msg; /* domain power on message */
+typedef upwr_down_1w_msg upwr_pwm_boot_start_msg; /* boot start message */
+
+/* functions with complex arguments use the pointer message formats: */
+typedef upwr_pointer_msg upwr_pwm_retain_msg;
+typedef upwr_pointer_msg upwr_pwm_pmode_cfg_msg;
+
+#if (UPWR_ARG_BITS < UPWR_DOMBIAS_ARG_BITS)
+#if ((UPWR_ARG_BITS + 32) < UPWR_DOMBIAS_ARG_BITS)
+#error "too few message bits for domain bias argument"
+#endif
+#endif
+
+/* service upwr_pwm_chng_dom_bias message argument fields */
+#define UPWR_DOMBIAS_MODE_BITS (2U)
+#define UPWR_DOMBIAS_RBB_BITS (8U)
+#define UPWR_DOMBIAS_RSV_BITS (14U)
+#define UPWR_DOMBIAS_ARG_BITS (UPWR_DOMBIAS_RSV_BITS + \
+ (2U * UPWR_DOMBIAS_MODE_BITS) + \
+ (4U * UPWR_DOMBIAS_RBB_BITS) + 2U)
+/*
+ * upwr_pwm_dom_bias_args is an SoC-dependent message,
+ */
+typedef struct {
+ uint32_t: 12U; /* TODO: find a way to use UPWR_HEADER_BITS */
+ uint32_t dommode : UPWR_DOMBIAS_MODE_BITS;
+ uint32_t avdmode : UPWR_DOMBIAS_MODE_BITS;
+ uint32_t domapply : 1U;
+ uint32_t avdapply : 1U;
+ uint32_t rsv : UPWR_DOMBIAS_RSV_BITS;
+ uint32_t domrbbn : UPWR_DOMBIAS_RBB_BITS; /* RTD/APD back bias N-well */
+ uint32_t domrbbp : UPWR_DOMBIAS_RBB_BITS; /* RTD/APD back bias P-well */
+ uint32_t avdrbbn : UPWR_DOMBIAS_RBB_BITS; /* AVD back bias N-well */
+ uint32_t avdrbbp : UPWR_DOMBIAS_RBB_BITS; /* AVD back bias P-well */
+} upwr_pwm_dom_bias_args;
+
+
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ struct {
+ upwr_pwm_dom_bias_args B;
+ } args;
+} upwr_pwm_dom_bias_msg;
+
+/* service upwr_pwm_chng_mem_bias message argument fields */
+/*
+ * upwr_pwm_mem_bias_args is an SoC-dependent message,
+ * defined in upower_soc_defs.h
+ */
+typedef struct {
+ uint32_t: 12U; /* TODO: find a way to use UPWR_HEADER_BITS */
+ uint32_t en : 1U;
+ uint32_t rsv : 19U;
+} upwr_pwm_mem_bias_args;
+
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ struct {
+ upwr_pwm_mem_bias_args B;
+ } args;
+} upwr_pwm_mem_bias_msg;
+
+typedef upwr_pointer_msg upwr_pwm_pes_seq_msg;
+
+/* upwr_pwm_reg_config-specific message format */
+typedef upwr_pointer_msg upwr_pwm_regcfg_msg;
+
+/* upwr_volt_pmic_volt-specific message format */
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t domain : 8U;
+ uint32_t rail : 8U;
+ } args;
+} upwr_volt_dom_pmic_rail_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t rail : 4U; /* pmic rail id */
+ uint32_t volt : 12U; /* voltage value, accurate to mV, support 0~3.3V */
+ } args;
+} upwr_volt_pmic_set_volt_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t rail : 16U; /* pmic rail id */
+ } args;
+} upwr_volt_pmic_get_volt_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv :UPWR_HEADER_BITS;
+ uint32_t domain : 8U;
+ uint32_t mode : 8U; /* work mode */
+ } args;
+} upwr_volt_dva_req_domain_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t mode : 16U; /* work mode */
+ } args;
+} upwr_volt_dva_req_soc_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t addr_offset : 16U; /* addr_offset to 0x28330000 */
+ } args;
+} upwr_volt_dva_dump_info_msg;
+
+typedef upwr_pointer_msg upwr_volt_pmiccfg_msg;
+typedef upwr_pointer_msg upwr_volt_dva_req_id_msg;
+typedef upwr_down_1w_msg upwr_volt_pmic_cold_reset_msg;
+
+/* upwr_pwm_volt-specific message format */
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t reg : UPWR_HALF_ARG_BITS; /* regulator id */
+ uint32_t volt : UPWR_HALF_ARG_BITS; /* voltage value */
+ } args;
+} upwr_pwm_volt_msg;
+
+/* upwr_pwm_freq_setup-specific message format */
+/**
+ * DVA adjust stage
+ */
+#define DVA_ADJUST_STAGE_INVALID 0U
+/* first stage, gross adjust, for increase frequency use */
+#define DVA_ADJUST_STAGE_ONE 1U
+/* second stage, fine adjust for increase frequency use */
+#define DVA_ADJUST_STAGE_TWO 2U
+/* combine first + second stage, for descrese frequency use */
+#define DVA_ADJUST_STAGE_FULL 3U
+
+/**
+ * This message structure is used for DVFS feature
+ * 1. Because user may use different PMIC or different board,
+ * the pmic regulator of RTD/APD may change,
+ * so, user need to tell uPower the regulator number.
+ * The number must be matched with PMIC IC and board.
+ * use 4 bits for pmic regulator, support to 16 regulator.
+ *
+ * use 2 bits for DVA stage
+ *
+ * use 10 bits for target frequency, accurate to MHz, support to 1024 MHz
+ */
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t rail : 4; /* pmic regulator */
+ uint32_t stage : 2; /* DVA stage */
+ uint32_t target_freq : 10; /* target frequency */
+ } args;
+} upwr_pwm_freq_msg;
+
+typedef upwr_down_2w_msg upwr_pwm_param_msg;
+
+/* upwr_pwm_pmiccfg-specific message format */
+typedef upwr_pointer_msg upwr_pwm_pmiccfg_msg;
+
+/* functions that pass a pointer use message format upwr_pointer_msg */
+typedef upwr_pointer_msg upwr_pwm_cfgptr_msg;
+
+/* functions that pass 2 pointers use message format upwr_2pointer_msg
+ */
+typedef upwr_2pointer_msg upwr_pwm_switch_msg;
+typedef upwr_2pointer_msg upwr_pwm_pwron_msg;
+typedef upwr_2pointer_msg upwr_pwm_pwroff_msg;
+
+/* Power Management all messages */
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ upwr_pwm_param_msg param; /* power management parameters */
+ upwr_pwm_dom_bias_msg dom_bias; /* domain bias message */
+ upwr_pwm_mem_bias_msg mem_bias; /* memory bias message */
+ upwr_pwm_pes_seq_msg pes; /* PE seq. message */
+ upwr_pwm_pmode_cfg_msg pmode; /* power mode config message */
+ upwr_pwm_regcfg_msg regcfg; /* regulator config message */
+ upwr_pwm_volt_msg volt; /* set voltage message */
+ upwr_pwm_freq_msg freq; /* set frequency message */
+ upwr_pwm_switch_msg switches; /* switch control message */
+ upwr_pwm_pwron_msg pwron; /* switch/RAM/ROM power on message */
+ upwr_pwm_pwroff_msg pwroff; /* switch/RAM/ROM power off message */
+ upwr_pwm_retain_msg retain; /* memory retain message */
+ upwr_pwm_cfgptr_msg cfgptr; /* configuration pointer message*/
+ upwr_pwm_dom_pwron_msg dompwron; /* domain power on message */
+ upwr_pwm_boot_start_msg boot; /* boot start message */
+} upwr_pwm_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ upwr_volt_pmic_set_volt_msg set_pmic_volt; /* set pmic voltage message */
+ upwr_volt_pmic_get_volt_msg get_pmic_volt; /* set pmic voltage message */
+ upwr_volt_pmic_set_mode_msg set_pmic_mode; /* set pmic mode message */
+ upwr_volt_pmiccfg_msg pmiccfg; /* PMIC configuration message */
+ upwr_volt_dom_pmic_rail_msg dom_pmic_rail; /* domain bias message */
+ upwr_volt_dva_dump_info_msg dva_dump_info; /* dump dva info message */
+ upwr_volt_dva_req_id_msg dva_req_id; /* dump dva request id array message */
+ upwr_volt_dva_req_domain_msg dva_req_domain; /* dump dva request domain message */
+ upwr_volt_dva_req_soc_msg dva_req_soc; /* dump dva request whole soc message */
+ upwr_volt_pmeter_meas_msg pmeter_meas_msg; /* pmeter measure message */
+ upwr_volt_vmeter_meas_msg vmeter_meas_msg; /* vmeter measure message */
+ upwr_volt_pmic_cold_reset_msg cold_reset_msg; /* pmic cold reset message */
+} upwr_volt_msg;
+
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t sensor_id : 16U; /* temperature sensor id */
+ } args;
+} upwr_temp_get_cur_temp_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t index : 8U; /* the delay meter index */
+ uint32_t path : 8U; /* the critical path number */
+ } args;
+} upwr_dmeter_get_delay_margin_msg;
+
+#define MAX_DELAY_MARGIN 63U
+#define MAX_DELAY_CRITICAL_PATH 7U
+#define MAX_DELAY_METER_NUM 1U
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t index: 4U; /* the delay meter index */
+ uint32_t path: 4U; /* the critical path number */
+ uint32_t dm: 8U; /* the delay margin value of delay meter */
+ } args;
+} upwr_dmeter_set_delay_margin_msg;
+
+#define MAX_PMON_CHAIN_SEL 1U
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_HEADER_BITS;
+ uint32_t chain_sel : 16U; /* the process monitor delay chain sel */
+ } args;
+} upwr_pmon_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ upwr_temp_get_cur_temp_msg get_temp_msg; /* get current temperature message */
+} upwr_temp_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr; /* message header */
+ upwr_dmeter_get_delay_margin_msg get_margin_msg; /* get delay margin message */
+ upwr_dmeter_set_delay_margin_msg set_margin_msg; /* set delay margin message */
+ upwr_pmon_msg pmon_msg; /* process monitor message */
+} upwr_dmeter_msg;
+
+typedef upwr_down_2w_msg upwr_down_max_msg; /* longest downstream msg */
+
+/*
+ * upwr_dom_bias_cfg_t and upwr_mem_bias_cfg_t are SoC-dependent structs,
+ * defined in upower_soc_defs.h
+ */
+/* Power and mem switches */
+typedef struct {
+ volatile struct upwr_switch_board_t swt_board[UPWR_PMC_SWT_WORDS];
+ volatile struct upwr_mem_switches_t swt_mem[UPWR_PMC_MEM_WORDS];
+} swt_config_t;
+
+/* *************************************************************************
+ * Service Group DIAGNOSE - downstream
+ ***************************************************************************/
+/* Diagnose Functions */
+#define UPWR_DGN_MODE (0U) /* 0 = diagnose mode: upwr_dgn_mode */
+#define UPWR_DGN_F_COUNT (1U)
+#define UPWR_DGN_BUFFER_EN (2U)
+typedef uint32_t upwr_dgn_f_t;
+
+#define UPWR_DGN_ALL2ERR (0U) /* record all until an error occurs, freeze recording on error */
+#define UPWR_DGN_ALL2HLT (1U) /* record all until an error occurs, halt core on error */
+#define UPWR_DGN_ALL (2U) /* trace, warnings, errors, task state recorded */
+#define UPWR_DGN_MAX UPWR_DGN_ALL
+#define UPWR_DGN_TRACE (3U) /* trace, warnings, errors recorded */
+#define UPWR_DGN_SRVREQ (4U) /* service request activity recorded */
+#define UPWR_DGN_WARN (5U) /* warnings and errors recorded */
+#define UPWR_DGN_ERROR (6U) /* only errors recorded */
+#define UPWR_DGN_NONE (7U) /* no diagnostic recorded */
+#define UPWR_DGN_COUNT (8U)
+typedef uint32_t upwr_dgn_mode_t;
+
+typedef upwr_down_1w_msg upwr_dgn_mode_msg;
+
+typedef union {
+ struct upwr_msg_hdr hdr;
+ upwr_dgn_mode_msg mode_msg;
+} upwr_dgn_msg;
+
+typedef struct {
+ struct upwr_msg_hdr hdr;
+ uint32_t buf_addr;
+} upwr_dgn_v2_msg;
+
+/* diagnostics log types in the shared RAM log buffer */
+
+typedef enum {
+ DGN_LOG_NONE = 0x00000000,
+ DGN_LOG_INFO = 0x10000000,
+ DGN_LOG_ERROR = 0x20000000,
+ DGN_LOG_ASSERT = 0x30000000,
+ DGN_LOG_EXCEPT = 0x40000000,
+ DGN_LOG_EVENT = 0x50000000, // old event trace
+ DGN_LOG_EVENTNEW = 0x60000000, // new event trace
+ DGN_LOG_SERVICE = 0x70000000,
+ DGN_LOG_TASKDEF = 0x80000000,
+ DGN_LOG_TASKEXE = 0x90000000,
+ DGN_LOG_MUTEX = 0xA0000000,
+ DGN_LOG_SEMAPH = 0xB0000000,
+ DGN_LOG_TIMER = 0xC0000000,
+ DGN_LOG_CALLTRACE = 0xD0000000,
+ DGN_LOG_DATA = 0xE0000000,
+ DGN_LOG_PCTRACE = 0xF0000000
+} upwr_dgn_log_t;
+
+/* ****************************************************************************
+ * UPSTREAM MESSAGES - RESPONSES
+ * ****************************************************************************
+ */
+/* generic ok/ko response message */
+#define UPWR_RESP_ERR_BITS (4U)
+#define UPWR_RESP_HDR_BITS (UPWR_RESP_ERR_BITS+\
+ UPWR_SRVGROUP_BITS+UPWR_FUNCTION_BITS)
+#define UPWR_RESP_RET_BITS (32U - UPWR_RESP_HDR_BITS)
+
+#define UPWR_RESP_OK (0U) /* no error */
+#define UPWR_RESP_SG_BUSY (1U) /* service group is busy */
+#define UPWR_RESP_SHUTDOWN (2U) /* services not up or shutting down */
+#define UPWR_RESP_BAD_REQ (3U) /* invalid request */
+#define UPWR_RESP_BAD_STATE (4U) /* system state doesn't allow perform the request */
+#define UPWR_RESP_UNINSTALLD (5U) /* service or function not installed */
+#define UPWR_RESP_UNINSTALLED (5U) /* service or function not installed (alias) */
+#define UPWR_RESP_RESOURCE (6U) /* resource not available */
+#define UPWR_RESP_TIMEOUT (7U) /* service timeout */
+#define UPWR_RESP_COUNT (8U)
+
+typedef uint32_t upwr_resp_t;
+
+struct upwr_resp_hdr {
+ uint32_t errcode : UPWR_RESP_ERR_BITS;
+ uint32_t srvgrp : UPWR_SRVGROUP_BITS; /* service group */
+ uint32_t function: UPWR_FUNCTION_BITS;
+ uint32_t ret : UPWR_RESP_RET_BITS; /* return value, if any */
+};
+
+/* generic 1-word upstream message format */
+typedef union {
+ struct upwr_resp_hdr hdr;
+ uint32_t word;
+} upwr_resp_msg;
+
+/* generic 2-word upstream message format */
+typedef struct {
+ struct upwr_resp_hdr hdr;
+ uint32_t word2; /* message second word */
+} upwr_up_2w_msg;
+
+typedef upwr_up_2w_msg upwr_up_max_msg;
+
+/* *************************************************************************
+ * Exception/Initialization - upstream
+ ***************************************************************************/
+#define UPWR_SOC_BITS (7U)
+#define UPWR_VMINOR_BITS (4U)
+#define UPWR_VFIXES_BITS (4U)
+#define UPWR_VMAJOR_BITS \
+ (32U - UPWR_HEADER_BITS - UPWR_SOC_BITS - UPWR_VMINOR_BITS - UPWR_VFIXES_BITS)
+
+typedef struct {
+ uint32_t soc_id;
+ uint32_t vmajor;
+ uint32_t vminor;
+ uint32_t vfixes;
+} upwr_code_vers_t;
+
+/* message sent by firmware initialization, received by upwr_init */
+typedef union {
+ struct upwr_resp_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_RESP_HDR_BITS;
+ uint32_t soc : UPWR_SOC_BITS; /* SoC identification */
+ uint32_t vmajor : UPWR_VMAJOR_BITS; /* firmware major version */
+ uint32_t vminor : UPWR_VMINOR_BITS; /* firmware minor version */
+ uint32_t vfixes : UPWR_VFIXES_BITS; /* firmware fixes version */
+ } args;
+} upwr_init_msg;
+
+/* message sent by firmware when the core platform is powered up */
+typedef upwr_resp_msg upwr_power_up_msg;
+
+/* message sent by firmware when the core reset is released for boot */
+typedef upwr_resp_msg upwr_boot_up_msg;
+
+/* message sent by firmware when ready for service requests */
+#define UPWR_RAM_VMINOR_BITS (7)
+#define UPWR_RAM_VFIXES_BITS (6)
+#define UPWR_RAM_VMAJOR_BITS (32 - UPWR_HEADER_BITS \
+ - UPWR_RAM_VFIXES_BITS - UPWR_RAM_VMINOR_BITS)
+typedef union {
+ struct upwr_resp_hdr hdr;
+ struct {
+ uint32_t rsv : UPWR_RESP_HDR_BITS;
+ uint32_t vmajor : UPWR_RAM_VMAJOR_BITS; /* RAM fw major version */
+ uint32_t vminor : UPWR_RAM_VMINOR_BITS; /* RAM fw minor version */
+ uint32_t vfixes : UPWR_RAM_VFIXES_BITS; /* RAM fw fixes version */
+ } args;
+} upwr_ready_msg;
+
+/* message sent by firmware when shutdown finishes */
+typedef upwr_resp_msg upwr_shutdown_msg;
+
+typedef union {
+ struct upwr_resp_hdr hdr;
+ upwr_init_msg init;
+ upwr_power_up_msg pwrup;
+ upwr_boot_up_msg booted;
+ upwr_ready_msg ready;
+} upwr_startup_up_msg;
+
+/* message sent by firmware for uPower config setting */
+typedef upwr_resp_msg upwr_config_resp_msg;
+
+/* message sent by firmware for uPower alarm */
+typedef upwr_resp_msg upwr_alarm_resp_msg;
+
+/* *************************************************************************
+ * Power Management - upstream
+ ***************************************************************************/
+typedef upwr_resp_msg upwr_param_resp_msg;
+
+enum work_mode {
+ OVER_DRIVE,
+ NORMAL_DRIVE,
+ LOW_DRIVE
+};
+
+#define UTIMER3_MAX_COUNT 0xFFFFU
+
+#endif /* UPWR_DEFS_H */
diff --git a/plat/imx/imx8ulp/upower/upower_hal.c b/plat/imx/imx8ulp/upower/upower_hal.c
new file mode 100644
index 0000000000..337857b74b
--- /dev/null
+++ b/plat/imx/imx8ulp/upower/upower_hal.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright 2020-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#include "upower_api.h"
+#include "upower_defs.h"
+
+#define UPOWER_AP_MU1_ADDR U(0x29280000)
+
+struct MU_t *muptr = (struct MU_t *)UPOWER_AP_MU1_ADDR;
+
+void upower_apd_inst_isr(upwr_isr_callb txrx_isr,
+ upwr_isr_callb excp_isr)
+{
+ /* Do nothing */
+}
+
+int upower_status(int status)
+{
+ int ret = -1;
+
+ switch (status) {
+ case 0:
+ VERBOSE("finished successfully!\n");
+ ret = 0;
+ break;
+ case -1:
+ VERBOSE("memory allocation or resource failed!\n");
+ break;
+ case -2:
+ VERBOSE("invalid argument!\n");
+ break;
+ case -3:
+ VERBOSE("called in an invalid API state!\n");
+ break;
+ default:
+ VERBOSE("invalid return status\n");
+ break;
+ }
+
+ return ret;
+}
+
+
+void upower_wait_resp(void)
+{
+ while (muptr->RSR.B.RF0 == 0) {
+ udelay(100);
+ }
+ upwr_txrx_isr();
+}
+
+static void user_upwr_rdy_callb(uint32_t soc, uint32_t vmajor, uint32_t vminor)
+{
+ NOTICE("%s: soc=%x\n", __func__, soc);
+ NOTICE("%s: RAM version:%d.%d\n", __func__, vmajor, vminor);
+}
+
+int upower_init(void)
+{
+ int status;
+
+ status = upwr_init(APD_DOMAIN, muptr, NULL, NULL, upower_apd_inst_isr, NULL);
+ if (upower_status(status)) {
+ ERROR("%s: upower init failure\n", __func__);
+ return -EINVAL;
+ }
+
+ NOTICE("%s: start uPower RAM service\n", __func__);
+ status = upwr_start(1, user_upwr_rdy_callb);
+ upower_wait_resp();
+ /* poll status */
+ if (upower_status(status)) {
+ NOTICE("%s: upower init failure\n", __func__);
+ return status;
+ }
+
+ return 0;
+}
+
+int upower_pwm(int domain_id, bool pwr_on)
+{
+ int ret, ret_val;
+ uint32_t swt;
+
+ if (domain_id == 9U || domain_id == 11U || domain_id == 12U) {
+ swt = BIT_32(12) | BIT_32(11) | BIT_32(10) | BIT_32(9);
+ } else {
+ swt = BIT_32(domain_id);
+ }
+
+ if (pwr_on) {
+ ret = upwr_pwm_power_on(&swt, NULL, NULL);
+ } else {
+ ret = upwr_pwm_power_off(&swt, NULL, NULL);
+ }
+
+ if (ret) {
+ NOTICE("%s failed: ret: %d, pwr_on: %d\n", __func__, ret, pwr_on);
+ return ret;
+ }
+ upower_wait_resp();
+
+ ret = upwr_poll_req_status(UPWR_SG_PWRMGMT, NULL, NULL, &ret_val, 1000);
+ if (ret != UPWR_REQ_OK) {
+ NOTICE("Failure %d, %s\n", ret, __func__);
+ if (ret == UPWR_REQ_BUSY) {
+ return -EBUSY;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int upower_read_temperature(uint32_t sensor_id, int32_t *temperature)
+{
+ int ret, ret_val;
+ upwr_resp_t err_code;
+ int64_t t;
+
+ ret = upwr_tpm_get_temperature(sensor_id, NULL);
+ if (ret) {
+ return ret;
+ }
+
+ upower_wait_resp();
+ ret = upwr_poll_req_status(UPWR_SG_TEMPM, NULL, &err_code, &ret_val, 1000);
+ if (ret > UPWR_REQ_OK) {
+ return ret;
+ }
+
+ t = ret_val & 0xff;
+ *temperature = (2673049 * t * t * t / 10000000 + 3734262 * t * t / 100000 +
+ 4487042 * t / 100 - 4698694) / 100000;
+
+ return 0;
+}
+
+int upower_pmic_i2c_write(uint32_t reg_addr, uint32_t reg_val)
+{
+ int ret, ret_val;
+ upwr_resp_t err_code;
+
+ ret = upwr_xcp_i2c_access(0x32, 1, 1, reg_addr, reg_val, NULL);
+ if (ret) {
+ WARN("pmic i2c read failed ret %d\n", ret);
+ return ret;
+ }
+
+ upower_wait_resp();
+ ret = upwr_poll_req_status(UPWR_SG_EXCEPT, NULL, &err_code, &ret_val, 1000);
+ if (ret != UPWR_REQ_OK) {
+ WARN("i2c poll Failure %d, err_code %d, ret_val 0x%x\n",
+ ret, err_code, ret_val);
+ return ret;
+ }
+
+ VERBOSE("PMIC write reg[0x%x], val[0x%x]\n", reg_addr, reg_val);
+
+ return 0;
+}
+
+int upower_pmic_i2c_read(uint32_t reg_addr, uint32_t *reg_val)
+{
+ int ret, ret_val;
+ upwr_resp_t err_code;
+
+ if (reg_val == NULL) {
+ return -1;
+ }
+
+ ret = upwr_xcp_i2c_access(0x32, -1, 1, reg_addr, 0, NULL);
+ if (ret) {
+ WARN("pmic i2c read failed ret %d\n", ret);
+ return ret;
+ }
+
+ upower_wait_resp();
+ ret = upwr_poll_req_status(UPWR_SG_EXCEPT, NULL, &err_code, &ret_val, 1000);
+ if (ret != UPWR_REQ_OK) {
+ WARN("i2c poll Failure %d, err_code %d, ret_val 0x%x\n",
+ ret, err_code, ret_val);
+ return ret;
+ }
+
+ *reg_val = ret_val;
+
+ VERBOSE("PMIC read reg[0x%x], val[0x%x]\n", reg_addr, *reg_val);
+
+ return 0;
+}
diff --git a/plat/imx/imx8ulp/upower/upower_soc_defs.h b/plat/imx/imx8ulp/upower/upower_soc_defs.h
new file mode 100644
index 0000000000..111be1481e
--- /dev/null
+++ b/plat/imx/imx8ulp/upower/upower_soc_defs.h
@@ -0,0 +1,1154 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/**
+ * Copyright 2019-2024 NXP
+ *
+ * KEYWORDS: micro-power uPower driver API
+ * -----------------------------------------------------------------------------
+ * PURPOSE: SoC-dependent uPower driver API #defines and typedefs shared
+ * with the firmware
+ * -----------------------------------------------------------------------------
+ * PARAMETERS:
+ * PARAM NAME RANGE:DESCRIPTION: DEFAULTS: UNITS
+ * -----------------------------------------------------------------------------
+ * REUSE ISSUES: no reuse issues
+ */
+
+#ifndef UPWR_SOC_DEFS_H
+#define UPWR_SOC_DEFS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "upower_defs.h"
+
+#define UPWR_MU_MSG_SIZE (2U) /* words */
+
+#ifdef NUM_PMC_SWT_WORDS
+#define UPWR_PMC_SWT_WORDS NUM_PMC_SWT_WORDS
+#endif
+
+#ifdef NUM_PMC_RAM_WORDS
+#define UPWR_PMC_MEM_WORDS NUM_PMC_RAM_WORDS
+#endif
+
+#ifndef UPWR_DRAM_SHARED_BASE_ADDR
+#define UPWR_DRAM_SHARED_BASE_ADDR (0x28330000U)
+#endif
+
+#ifndef UPWR_DRAM_SHARED_SIZE
+#define UPWR_DRAM_SHARED_SIZE (2048U)
+#endif
+
+#define UPWR_DRAM_SHARED_ENDPLUS (UPWR_DRAM_SHARED_BASE_ADDR+\
+ UPWR_DRAM_SHARED_SIZE)
+
+#ifndef UPWR_API_BUFFER_BASE
+#define UPWR_API_BUFFER_BASE (0x28330600U)
+#endif
+
+#ifndef UPWR_API_BUFFER_ENDPLUS
+#define UPWR_API_BUFFER_ENDPLUS (UPWR_DRAM_SHARED_ENDPLUS - 64U)
+#endif
+
+#ifndef UPWR_PMC_SWT_WORDS
+#define UPWR_PMC_SWT_WORDS (1U)
+#endif
+
+#ifndef UPWR_PMC_MEM_WORDS
+#define UPWR_PMC_MEM_WORDS (2U)
+#endif
+
+#define UPWR_OSC_HI_FREQ (64U) // MHz
+#define UPWR_OSC_LO_FREQ (16U) // MHz
+
+#ifndef UPWR_I2C_FREQ
+#define UPWR_I2C_FREQ (UPWR_OSC_HI_FREQ * 1000000U)
+#endif
+
+/*
+ * i.MX8ULP-dependent uPower API Definition
+ *
+ * This chapter documents the API definitions that are specific to the
+ * i.MX8ULP SoC.
+ *
+ */
+
+/**---------------------------------------------------------------
+ * INITIALIZATION, CONFIGURATION
+ *
+ * i.MX8ULP provides only one Message Unit (MU) for each core domain:
+ * Real Time Domain (RTD) and Application Domain (APD), which has two A35 cores.
+ * Both A35 cores in APD must share the same API instance, meaning upwr_init
+ * must be called only once for each domain. The API does not provide any
+ * mutually exclusion or locking mechanism for concurrent accesses from both
+ * APD cores, so any API arbitration, if needed, must be implemented by the
+ * API user code.
+ *
+ * A domain must not go to Power Down (PD) or Deep Power Down (DPD) power modes
+ * with any service still pending (response not received).
+ *
+ * Next sections describe the i.MX8ULP particularities of service calls.
+ *
+ */
+
+/**+
+ * upwr_start()
+ *
+ * i.MX8ULP ROM firmware provides only the launch option 0, which has no
+ * power mode transition support and provides the following services:
+ * - upwr_xcp_config
+ * - upwr_xcp_sw_alarm
+ * - upwr_pwm_param
+ * - upwr_pwm_power_on
+ * - upwr_pwm_power-off
+ * - upwr_pwm_mem_retain
+ * - upwr_pwm_chng_dom_bias
+ * - upwr_pwm_chng_mem_bias
+ *
+ * i.MX8ULP RAM firmware provides 2 launch options:
+ *
+ * 1. starts all tasks, services and power mode ones;
+ * this is the full-featured firmware option.
+ * 2. starts only the power mode tasks; services are not available with
+ * this option, and futher calls to upwr_start (from either domain)
+ * have no response; this option is mostly used to accelerate power mode
+ * mixed-signal simulations, and not intended to be used with silicon.
+ *
+ * Note: option 0 is also available if the RAM firmware is loaded.
+ */
+
+/* service upwr_pwm_set_domain_pmic_rail message argument fields*/
+typedef struct {
+ uint32_t domain : 16U;
+ uint32_t rail : 16U;
+} upwr_pwm_dom_pmic_rail_args;
+
+#define UPWR_FILL_DOMBIAS_ARGS(dom, bias, args) \
+do { \
+ (args).B.domapply = (args).B.avdapply = 0U; \
+ switch ((bias)->apply) { \
+ case BIAS_APPLY_RTD_AVD: \
+ (args).B.avdapply = 1U; \
+ /* fall through */ \
+ case BIAS_APPLY_RTD: \
+ (dom) = (uint32_t)RTD_DOMAIN; \
+ (args).B.domapply = 1U; \
+ break; \
+ case BIAS_APPLY_APD_AVD: \
+ (args).B.avdapply = 1U; \
+ /* fall through */ \
+ case BIAS_APPLY_APD: \
+ (dom) = (uint32_t)APD_DOMAIN; \
+ (args).B.domapply = 1U; \
+ break; \
+ case BIAS_APPLY_AVD: \
+ (args).B.avdapply = 1U; \
+ break; \
+ default: \
+ break; \
+ } \
+ (args).B.dommode = (uint32_t)((bias)->dommode); \
+ (args).B.avdmode = (uint32_t)((bias)->avdmode); \
+ uint32_t sat = UPWR_BIAS2MILIV((1UL << UPWR_DOMBIAS_RBB_BITS) - 1UL);\
+ (args).B.domrbbn = ((bias)->dombias.rbbn > sat) ? sat : \
+ UPWR_BIAS_MILIV((bias)->dombias.rbbn); \
+ (args).B.domrbbp = ((bias)->dombias.rbbp > sat) ? sat : \
+ UPWR_BIAS_MILIV((bias)->dombias.rbbp); \
+ (args).B.avdrbbn = ((bias)->avdbias.rbbn > sat) ? sat : \
+ UPWR_BIAS_MILIV((bias)->avdbias.rbbn); \
+ (args).B.avdrbbp = ((bias)->avdbias.rbbp > sat) ? sat : \
+ UPWR_BIAS_MILIV((bias)->avdbias.rbbp); \
+} while (false)
+
+#define UPWR_FILL_MEMBIAS_ARGS(bias, args) \
+do { \
+ (args).B.en = (bias)->en; \
+} while (false)
+
+
+#define UPWR_APD_CORES (2U)
+#define UPWR_RTD_CORES (1U)
+
+#define RTD_DOMAIN (0U)
+#define APD_DOMAIN (1U)
+#define UPWR_MAIN_DOMAINS (2U)
+#define AVD_DOMAIN (2U)
+#define UPWR_DOMAIN_COUNT (3U)
+#define PSD_DOMAIN (3U)
+#define UPWR_ALL_DOMAINS (4U)
+
+typedef uint32_t soc_domain_t;
+
+/*=========================================================================
+ * UNIT CONVERSION MACROS
+ * These macros convert physical units to the values passed as arguments
+ * in API functions.
+ *=========================================================================
+ */
+
+#define UPWR_VOLT_MILIV(v) (v) /* voltage in mV to argument value */
+#define UPWR_VOLT_MICROV(v)((v) / 1000U) /* voltage in uV to argument value */
+#define UPWR_BIAS_MILIV(v) (((v) + 49UL) / 50UL) /* bias voltage(mV) to argument value */
+#define UPWR_BIAS2MILIV(v) ((v) * 50UL) /* inverse of UPWR_BIAS_MILIV */
+#define UPWR_FREQ_KHZ(f) (f) /* frequency (kHz) to argument value */
+
+#define UPWR_DOMBIAS_MAX_MV (UPWR_BIAS2MILIV((1U << UPWR_DOMBIAS_RBB_BITS) - 1U))
+
+/**---------------------------------------------------------------
+ * EXCEPTION SERVICE GROUP
+ */
+
+/**+
+ * upwr_xcp_config()
+ *
+ * The i.MX8ULP uPower configuration struct contains the following bitfields:
+ *
+ * - ALARM_INT (1 bit): tells which RTD MU interrupt should be used for alarms;
+ * 1= MU GPI1; 0= MU GPI0; APD alarms always use GPI0.
+ * - CFG_IOMUX (1 bit): determintes if uPower configures i.MX8ULP IOMUX for
+ * I2C and mode pins used to control an external PMIC;
+ * 1= uPower firmware or PMIC driver configures i.MX8ULP IOMUX and mode pins;
+ * 0= i.MX8ULP IOMUX and mode pins not configured by uPower;
+ * - DGNBUFBITS (4 bits): determines the diagnostic buffer size according to
+ * the formula: size = 2^(DGNBUFBITS+3) bytes;
+ *
+ * Defaults are all zeroes; all other bits are reserved, and must be written 0.
+ */
+
+typedef union {
+ uint32_t R;
+ struct {
+ uint32_t ALARM_INT : 1U;
+ uint32_t CFG_IOMUX : 1U;
+ uint32_t DGNBUFBITS : 4U;
+ uint32_t RSV : 26U;
+ } B;
+} upwr_xcp_config_t;
+
+/**+
+ * upwr_xcp_sw_alarm()
+ *
+ * Argument code is defined by the enum upwr_alarm_t, with the values:
+ * - UPWR_ALARM_INTERNAL: internal software error
+ * - UPWR_ALARM_EXCEPTION: uPower core exception, either illegal instruction or
+ * bus error
+ * - UPWR_ALARM_SLACK: delay path too slow, meaning a timing violation occurred
+ * or is iminent.
+ * - UPWR_ALARM_VOLTAGE: one of the measured voltages is below safety margins.
+ *
+ * Note that this service emulates an alarm that would normally be issued by
+ * uPower when it detects one of the causes above. A request to alarm the APD
+ * domain when it is powered off returns success, but is ineffective.
+ *
+ */
+
+#define UPWR_ALARM_INTERNAL (0U) /* internal error */
+#define UPWR_ALARM_EXCEPTION (1U) /* core exception */
+#define UPWR_ALARM_SLACK (2U) /* delay path too slow */
+#define UPWR_ALARM_VOLTAGE (3U) /* voltage drop */
+#define UPWR_ALARM_LAST UPWR_ALARM_VOLTAGE
+
+typedef uint32_t upwr_alarm_t;
+
+/**---------------------------------------------------------------
+ * POWER MANAGEMENT SERVICE GROUP
+ */
+
+/* values in mV: */
+#define UPWR_RTD_RBBN_MAX (1300U) /* max. RTD Reverse Back Bias N-Well */
+#define UPWR_RTD_RBBN_MIN (100U) /* min. RTD Reverse Back Bias N-Well */
+
+#define UPWR_RTD_RBBP_MAX (1300U) /* max. RTD Reverse Back Bias P-Well */
+#define UPWR_RTD_RBBP_MIN (100U) /* min. RTD Reverse Back Bias P-Well */
+
+/* APD bias can only two values (mV): */
+#define UPWR_APD_RBBN_LO (1000U) /* low APD Reverse Back Bias N-Well */
+#define UPWR_APD_RBBN_HI (1300U) /* high APD Reverse Back Bias N-Well */
+
+#define UPWR_APD_RBBP_LO (1000U) /* low APD Reverse Back Bias P-Well */
+#define UPWR_APD_RBBP_HI (1300U) /* high APD Reverse Back Bias P-Well */
+
+/* AVD bias can only two values (mV): */
+#define UPWR_AVD_RBBN_LO (1000U) /* low AVD Reverse Back Bias N-Well */
+#define UPWR_AVD_RBBN_HI (1300U) /* high AVD Reverse Back Bias N-Well */
+
+#define UPWR_AVD_RBBP_LO (1000U) /* low AVD Reverse Back Bias P-Well */
+#define UPWR_AVD_RBBP_HI (1300U) /* high AVD Reverse Back Bias P-Well */
+
+/**+
+ * upwr_pwm_param()
+ *
+ * Argument param is defined by the struct/union upwr_pwm_param_t with the
+ * following i.MX8ULP-specific bitfields:
+ * - DPD_ALLOW (1 bit): 1= allows uPower power mode to go Deep Power Down (DPD);
+ * uPower DPD also depends on other conditions, but if this bit is 0 uPower
+ * won't go DPD even if those conditions are met; it can go either Sleep or
+ * Deep Sleep (DSL) depending on the other configurations.
+ * - DSL_DIS (1 bit): if this bit is 1, uPower power mode won't go Deep Sleep
+ * (DSL) even if the other conditions for that are met;
+ * it may go Sleep instead.
+ * - SLP_ALLOW (1 bit): if this bit is 1, uPower power mode will go Sleep if
+ * the conditions for Partial Active are met; it may also go Deep Sleep if bit
+ * DSL_DIS=1.
+ * - DSL_BGAP_OFF (1 bit): 1= turns bandgap off when uPower goes Deep Sleep;
+ * 0= leaves bandgap on when uPower goes Deep Sleep (DSL).
+ * - DPD_BGAP_ON (1 bit): 1= leaves bandgap on when uPower goes Deep Power Down
+ * (DPD); 0= powers off bandgap when uPower goes Deep Power Down (DPD).
+ *
+ * Defaults are all zeroes; all other bits are reserved, and must be written 0.
+ */
+
+typedef union {
+ uint32_t R;
+ struct {
+ uint32_t DPD_ALLOW : 1U;
+ uint32_t DSL_DIS : 1U;
+ uint32_t SLP_ALLOW : 1U;
+ uint32_t DSL_BGAP_OFF : 1U;
+ uint32_t DPD_BGAP_ON : 1U;
+ uint32_t RSV : 27U;
+ } B;
+} upwr_pwm_param_t;
+
+/**+
+ * upwr_pwm_chng_reg_voltage()
+ *
+ * Argument reg is defined by the enum upwr_pmc_reg_t, with regulator ids:
+ * - RTD_PMC_REG: RTD regulator
+ * - APD_PMC_REG: APD regulator
+ * - RTD_BIAS_PMC_REG: RTD bias regulator
+ * - APD_BIAS_PMC_REG: APD bias regulator
+ * - RTD_LVD_PMC_MON: RTD LVD regulator
+ * - APD_LVD_PMC_MON: APD LVD regulator
+ * - AVD_LVD_PMC_MON: AVD LVD regulator
+ *
+ * Argument volt is defined by the formula:
+ *
+ * argument = 92.30797633*V - 55.000138, rounded to the nearest integer,
+ * where V is the value in Volts, with a minimum of 0.595833 V (argument = 0).
+ *
+ */
+
+/* Regulator ids */
+typedef enum {
+ RTD_PMC_REG,
+ APD_PMC_REG,
+ RTD_BIAS_PMC_REG,
+ APD_BIAS_PMC_REG,
+ RTD_LVD_PMC_MON,
+ APD_LVD_PMC_MON,
+ AVD_LVD_PMC_MON
+} upwr_pmc_reg_t;
+
+/**+
+ * upwr_pwm_freq_setup()
+ *
+ * Argument domain is either RTD_DOMAIN or APD_DOMAIN.
+ * Arguments nextfq and currfq are to be defined (TBD).
+ */
+
+/**+
+ * upwr_pwm_dom_power_on()
+ *
+ * The arguments must comply with the restrictions below, otherwise the service
+ * is not executed and returns error UPWR_RESP_BAD_REQ:
+ * - argument domain can only be APD_DOMAIN, because in i.MX8ULP it is not
+ * possible APD powered on (calling the service) with RTD completely
+ * powered off.
+ * - the call can only be made from the RTD domain, for the same reason.
+ * - argument boot can only be 1, because in i.MX8ULP it is not possible to
+ * power on the APD domain without starting the core boot.
+ *
+ * If APD is already powered on and booting/booted when the service is called,
+ * it returns success without doing anything.
+ */
+
+/**+
+ * upwr_pwm_boot_start()
+ *
+ * The arguments must comply with the restrictions below, otherwise the service
+ * is not executed and returns error UPWR_RESP_BAD_REQ:
+ * - argument domain can only be APD_DOMAIN, because in i.MX8ULP it is not
+ * possible APD powered on (calling the service) with RTD completely
+ * powered off.
+ * - the call can only be made from the RTD domain, for the same reason.
+ *
+ * If APD is already booted when the service is called, it returns success
+ * without doing anything. Otherwise, it returns the error UPWR_RESP_BAD_STATE,
+ * because in i.MX8ULP APD cannot be booted separately from power on.
+ */
+
+/**+
+ * upwr_pwm_power_on(),
+ * upwr_pwm_power_off(),
+ * upwr_pwm_mem_retain()
+ *
+ * These three service functions use the same arguments:
+ *
+ * argument swt is an array of one 32-bit word: uint32_t swt[1];
+ * naturally the pointer to a single uint32_t variable may be passed.
+ * Each bit of the word corresponds to a switch, according to the i.MX8ULP
+ * Reference Manual Rev B draft 2 table 64 Power switch reset state,
+ * and the following formula:
+ *
+ * if switch number < 10 bit number = switch number;
+ * if switch number > 9 bit number = switch number + 3;
+ *
+ * bits 9, 10, 11 and 12 must have the same value (corresponding to switch 9)
+ *
+ * Note: this argument is not used in upwr_pwm_mem_retain.
+ *
+ * argument mem is an array of two 32-bit words: uint32_t mem[2];
+ * naturally the pointer to a single uint64_t variable may be passed, since
+ * both ARM and RISC-V are little endian architectures.
+ * Each bit of the words corresponds to a memory, according to the i.MX8ULP
+ * Reference Manual table "Memory Partitions".
+ *
+ * Turning a memory completely on (array and peripheral) will automatically
+ * turn on its power switch, even if not explicitly commanded.
+ * Turning a memory's power switch off will automatically turn off its array
+ * and peripheral beforehand, even if not explicitly commanded.
+ *
+ * Argument restrictions:
+ *
+ * The swt and mem arguments must comply with the restrictions below, otherwise
+ * the service is not executed (no switch/memory is changed) and returns error
+ * UPWR_RESP_BAD_REQ:
+ * 1. one must not put a memory in retention coming from an off state.
+ * 2. switches 9, 10, 11 and 12 must be turned on/off simultaneously.
+ * 3. an AVD switch can only be turned off if all AVD switches belong to the
+ * domain requesting the service (as defined by registers SYSCTRL0,
+ * LPAV_MASTER_ALLOC_CTRL and LPAV_SLAVE_ALLOC_CTRL);
+ * there is no such restriction to turn the switch on.
+ * 4. an AVD memory can only be turned off or put in retention if all
+ * AVD memories belong to the domain requesting the service
+ * (as defined by registers SYSCTRL0, LPAV_MASTER_ALLOC_CTRL and
+ * LPAV_SLAVE_ALLOC_CTRL); there is no such restriction to turn on the
+ * memories.
+ * 5. EdgeLock RAMs must not be turned off, unless RTD domain is in
+ * Deep Power Down (DPD).
+ * 6. Power Switch 19 must be on to turn on switches 17 (MIPI/DSI),
+ * 18 (MIPI/CSI), and all AVD power switches.
+ *
+ * Service Errors:
+ *
+ * Besides the error UPWR_RESP_BAD_REQ caused by violations of the restrictions
+ * above, the services may fail with error UPWR_RESP_RESOURCE if a power mode
+ * transition or a similar service is executing at the same time.
+ * This error should be interpreted as a "try later" response, as the service
+ * will succeed once those concurrent executions are done, and no other is
+ * started.
+ */
+
+/**+
+ * upwr_pwm_chng_switch_mem()
+ *
+ * The bit numbers in the argument struct mask and on/off state fields
+ * are the same as for services upwr_pwm_power_on, upwr_pwm_power_off and
+ * upwr_pwm_mem_retain.
+ *
+ * Turning a memory completely on (array and peripheral) will automatically
+ * turn on its power switch, even if not explicitly commanded.
+ *
+ * Argument restrictions:
+ *
+ * Same argument restrictions as services upwr_pwm_power_on, upwr_pwm_power_off
+ * and upwr_pwm_mem_retain, plus the following:
+ *
+ * 1. one must not turn a memory peripheral on and a memory array off.
+ * 2. one must not put a memory in retention and switch its power switch off.
+ *
+ * Service Errors:
+ *
+ * Besides the error UPWR_RESP_BAD_REQ caused by violations of the restrictions
+ * above, the service may fail with error UPWR_RESP_RESOURCE if a power mode
+ * transition or a similar service is executing at the same time.
+ * This error should be interpreted as a "try later" response, as the service
+ * will succeed once those concurrent executions are done, and no other is
+ * started.
+ */
+
+/**+
+ * upwr_pwm_pmode_config()
+ *
+ * The same power switch and memory restrictions of service
+ * upwr_pwm_chng_switch_mem apply between power modes, however they are not
+ * enforced by this service, that is, it does not return service error.
+ *
+ * The default power mode configurations for RTD and APD are documented in the
+ * i.MX8ULP Reference Manual sections "Power mode details (real-time domain)"
+ * and "Power mode details (application domain)", respectively.
+ * If those configurations are satisfactory, this service does not have
+ * to be called.
+ *
+ * Power Mode Configuration Structure:
+ *
+ * Follows a description of the power mode configuration structure elements.
+ * - dom_swts: the same switch configuration structures used in service
+ * upwr_pwm_chng_switch_mem argument swt.
+ * - mem_swts: the same memory configuration structures used in service
+ * upwr_pwm_chng_switch_mem argument mem.
+ * - regs: an array of structs base_reg_cfg_t (see upower_soc_defs.h),
+ * one element for each regulator; base_reg_cfg_t has fields
+ * mode (regulator-dependent), lvl (voltage level in uV),
+ * comp (regulator-dependent complamentary info).
+ * - pads: pad configuration in low power; see pad_cfg_t definition below.
+ * - mons: domain monitors (LVD and HVD) configuration;
+ * see mon_cfg_t definition below.
+ * - avd_mons: same as mons for the AVD domain; see mon_cfg_t definition below.
+ * - dom_bbias: back-bias configuration for the domain;
+ * see base_bbias_cfg_t definition below.
+ * - avd_bbias: back-bias configuration for the AVD domain;
+ * see base_bbias_cfg_t definition below.
+ * - mem_bbias: back-bias configuration for the memory;
+ * see base_bbias_cfg_t definition below.
+ * - mem_fbias: forward-bias configuration for the memory;
+ * see base_fbias_cfg_t definition below.
+ * - pmic: PMIC-specific configuration
+ *
+ * Structure pad_cfg_t:
+ *
+ * Pad control for low power modes (power off, etc), 1 bit per pad segment.
+ * - rst : put pad segment in reset.
+ * - iso : put pad segment in isolation.
+ * - compl: specific pad segment information.
+ * - msk : select which pads will be updated.
+ *
+ * Structure mon_cfg_t:
+ *
+ * Configures a voltage monitor and its actions.
+ * There are monitors for RTD, APD and AVD, monitoring LVD and HVD.
+ * - lvl : Voltage level (in uV).
+ * - mode : Mode of monitor (ON, OFF, LP, etc).
+ * - compl: Extra info for the monitor.
+ *
+ * Structure base_bbias_cfg_t:
+ *
+ * Configures back-bias (for domain or memory).
+ * - mode : Back bias mode (OFF, RBB, ARBB, etc).
+ * - p_lvl: Voltage level of p-well (in mV).
+ * - n_lvl: Voltage level of n-well (in mV).
+ * - compl: Complementary bias-specific (enable reset, interrupt, clamp, etc).
+ *
+ * Structure base_fbias_cfg_t:
+ *
+ * Configure memory forward bias for a memory segment.
+ *
+ * - mode : Forward bias mode (OFF, ON).
+ * - msk : Selects which memory will be updated
+ *
+ */
+
+/*=========================================================================
+ * Domain bias
+ *=========================================================================
+ */
+
+/**+
+ * upwr_pwm_chng_dom_bias()
+ *
+ * Argument bias is a pointer to a struct with fields:
+ * - apply: tells to which domains the bias must be applied;
+ * options are RTD only (BIAS_APPLY_RTD), RTD and AVD (BIAS_APPLY_RTD_AVD),
+ * APD only (BIAS_APPLY_APD), APD and AVD (BIAS_APPLY_APD_AVD),
+ * AVD only (BIAS_APPLY_AVD)
+ * - dommode: bias mode of the main domain (RTD or APD, determined by apply);
+ * options are disabled (NBB_BIAS_MODE), reverse back bias (RBB_BIAS_MODE),
+ * asymmetrical forward bias (AFBB_BIAS_MODE), asymmetrical reverse bias
+ * (ARBB_BIAS_MODE).
+ * - avdmode: bias mode of Audio-Video Domain (AVD);
+ * options are the same as dommode.
+ * - dombias: bias voltage level(s) for the main domain (RTD or APD,
+ * determined by apply); it is a structure with 2 fields, rbbn and rbbp,
+ * for the N-well and P-well voltages, respectively; values are in mV.
+ * - avdbias: bias voltage level(s) for the Audio-Video Domain (AVD);
+ * same fields as dombias;
+ *
+ * Argument restrictions:
+ *
+ * Voltage levels must comply with the #define-determined limits/options:
+ * between UPWR_RTD_RBBN_MIN and UPWR_RTD_RBBN_MAX (inclusive) for RTD N-well;
+ * between UPWR_RTD_RBBP_MIN and UPWR_RTD_RBBP_MAX (inclusive) for RTD P-well;
+ * either UPWR_APD_RBBN_LO or UPWR_APD_RBBN_HI for APD N-well;
+ * either UPWR_APD_RBBP_LO or UPWR_APD_RBBP_HI for APD P-well;
+ * either UPWR_AVD_RBBN_LO or UPWR_AVD_RBBN_HI for AVD N-well;
+ * either UPWR_AVD_RBBP_LO or UPWR_AVD_RBBP_HI for AVD P-well;
+ *
+ * But note that the limits/options above do not apply to all bias modes:
+ * rbbn is used and checked only in mode RBB_BIAS_MODE;
+ * rbbp is used and checked only in modes RBB_BIAS_MODE and ARBB_BIAS_MODE;
+ * modes AFBB_BIAS_MODE and NBB_BIAS_MODE use or check neither rbbn nor rbbp;
+ *
+ * Service error UPWR_RESP_BAD_REQ is returned if the voltage limits/options
+ * above are violated.
+ */
+
+/* argument struct for service upwr_pwm_chng_dom_bias:
+ */
+
+typedef enum { /* bias modes (both domain and memory): */
+ NBB_BIAS_MODE = 0, /* bias disabled */
+ RBB_BIAS_MODE = 1, /* reverse back bias enabled */
+ AFBB_BIAS_MODE = 2, /* asymmetrical forward bias */
+ ARBB_BIAS_MODE = 3 /* asymmetrical reverse bias */
+} upwr_bias_mode_t;
+
+/* Domain Bias config (one per domain) */
+
+typedef enum {
+ BIAS_APPLY_RTD, /* apply to RTD only */
+ BIAS_APPLY_RTD_AVD, /* apply to RTD and AVD */
+ BIAS_APPLY_APD, /* apply to APD only */
+ BIAS_APPLY_APD_AVD, /* apply to APD and AVD */
+ BIAS_APPLY_AVD, /* apply to AVD only */
+ BIAS_APPLY_COUNT /* number of apply options */
+} upwr_bias_apply_t;
+
+typedef struct {
+ uint16_t rbbn; /* reverse back bias N well (mV) */
+ uint16_t rbbp; /* reverse back bias P well (mV) */
+} upwr_rbb_t;
+
+struct upwr_dom_bias_cfg_t {
+ upwr_bias_apply_t apply; /* bias application option */
+ upwr_bias_mode_t dommode; /* RTD/APD bias mode config */
+ upwr_bias_mode_t avdmode; /* AVD bias mode config */
+ upwr_rbb_t dombias; /* RTD/APD reverse back bias */
+ upwr_rbb_t avdbias; /* AVD reverse back bias */
+};
+
+/* bias struct used in power mode config definitions */
+
+/**
+ * When write power mode transition program, please read below comments carefully.
+ * The structure and logic is complex, There is a lot of extension and reuse.
+ *
+ * First, for mode, extend "uint32_t mode" to a union struct, add support for AVD:
+ * typedef union {
+ * uint32_t R;
+ * struct {
+ * uint32_t mode : 8;
+ * uint32_t rsrv_1 : 8;
+ * uint32_t avd_mode : 8;
+ * uint32_t rsrv_2 : 8;
+ * } B;
+ * } dom_bias_mode_cfg_t;
+
+ Second, if mode is AFBB mode, no need to configure rbbn and rbbp, uPower firmware
+ will configure all SRAM_AFBB_0 or SRAM_AFBB_1 for corresponding domain.
+
+ Third, if mode is RBB mode, extend "uint32_t rbbn" and "uint32_t rbbp" to a union
+ struct, add support for AVD:
+ typedef union {
+ uint32_t R;
+ struct {
+ uint32_t lvl : 8;
+ uint32_t rsrv_1 : 8;
+ uint32_t avd_lvl : 8;
+ uint32_t rsrv_2 : 8;
+ } B;
+} dom_bias_lvl_cfg_t;
+
+ *
+ */
+typedef struct {
+ uint32_t mode; /* Domain bias mode config, extend to dom_bias_mode_cfg_t to support RTD, APD, AVD */
+ uint32_t rbbn; /* reverse back bias N well */
+ uint32_t rbbp; /* reverse back bias P well */
+} UPWR_DOM_BIAS_CFG_T;
+
+/*=========================================================================
+ * Memory bias
+ *=========================================================================
+ */
+/**+
+ * upwr_pwm_chng_mem_bias()
+ *
+ * Argument struct contains only the field en, which can be either 1 (bias
+ * enabled) or 0 (bias disabled).
+ *
+ * Argument domain must be either RTD_DOMAIN (Real Time Domain) or APD_DOMAIN
+ * (Application Domain).
+ */
+
+/* Memory Bias config */
+struct upwr_mem_bias_cfg_t {
+ uint32_t en; /* Memory bias enable config */
+};
+
+/* bias struct used in power mode config definitions */
+typedef struct {
+ uint32_t en; /* Memory bias enable config */
+} UPWR_MEM_BIAS_CFG_T;
+
+/* Split different Bias */
+struct upwr_pmc_bias_cfg_t {
+ UPWR_DOM_BIAS_CFG_T dombias_cfg; /* Domain Bias config */
+ UPWR_MEM_BIAS_CFG_T membias_cfg; /* Memory Bias config */
+};
+
+/*=========================================================================
+ * Power modes
+ *=========================================================================
+ */
+
+/* from msb->lsb: Azure bit, dual boot bit, low power boot bit */
+typedef enum {
+ SOC_BOOT_SINGLE = 0,
+ SOC_BOOT_LOW_PWR = 1,
+ SOC_BOOT_DUAL = 2,
+ SOC_BOOT_AZURE = 4
+} SOC_BOOT_TYPE_T;
+
+#ifdef UPWR_COMP_RAM
+/* Power modes for RTD domain */
+typedef enum {
+ DPD_RTD_PWR_MODE, /* Real Time Deep Power Down mode */
+ PD_RTD_PWR_MODE, /* Real Time Power Down mode */
+ DSL_RTD_PWR_MODE, /* Real Time Domain Deep Sleep Mode */
+ HLD_RTD_PWR_MODE, /* Real Time Domain Hold Mode */
+ SLP_RTD_PWR_MODE, /* Sleep Mode */
+ ADMA_RTD_PWR_MODE,/* Active DMA Mode */
+ ACT_RTD_PWR_MODE, /* Active Domain Mode */
+ NUM_RTD_PWR_MODES
+} upwr_ps_rtd_pwr_mode_t;
+
+/* Abstract power modes */
+typedef enum {
+ DPD_PWR_MODE,
+ PD_PWR_MODE,
+ PACT_PWR_MODE,
+ DSL_PWR_MODE,
+ HLD_PWR_MODE,
+ SLP_PWR_MODE,
+ ADMA_PWR_MODE,
+ ACT_PWR_MODE,
+ NUM_PWR_MODES,
+ NUM_APD_PWR_MODES = NUM_PWR_MODES,
+ TRANS_PWR_MODE = NUM_PWR_MODES,
+ INVALID_PWR_MODE = TRANS_PWR_MODE + 1
+} abs_pwr_mode_t;
+#else /* UPWR_COMP_RAM */
+/* Power modes for RTD domain */
+#define DPD_RTD_PWR_MODE (0U) /* Real Time Deep Power Down mode */
+#define PD_RTD_PWR_MODE (1U) /* Real Time Power Down mode */
+#define DSL_RTD_PWR_MODE (2U) /* Real Time Domain Deep Sleep Mode */
+#define HLD_RTD_PWR_MODE (3U) /* Real Time Domain Hold Mode */
+#define SLP_RTD_PWR_MODE (4U) /* Sleep Mode */
+#define ADMA_RTD_PWR_MODE (5U) /* Active DMA Mode */
+#define ACT_RTD_PWR_MODE (6U) /* Active Domain Mode */
+#define NUM_RTD_PWR_MODES (7U)
+
+typedef uint32_t upwr_ps_rtd_pwr_mode_t;
+
+/* Abstract power modes */
+#define DPD_PWR_MODE (0U)
+#define PD_PWR_MODE (1U)
+#define PACT_PWR_MODE (2U)
+#define DSL_PWR_MODE (3U)
+#define HLD_PWR_MODE (4U)
+#define SLP_PWR_MODE (5U)
+#define ADMA_PWR_MODE (6U)
+#define ACT_PWR_MODE (7U)
+#define NUM_PWR_MODES (8U)
+#define NUM_APD_PWR_MODES NUM_PWR_MODES
+#define TRANS_PWR_MODE NUM_PWR_MODES
+#define INVALID_PWR_MODE (TRANS_PWR_MODE + 1U)
+
+typedef uint32_t abs_pwr_mode_t;
+#endif /* UPWR_COMP_RAM */
+
+typedef struct {
+ abs_pwr_mode_t mode;
+ bool ok;
+} pch_trans_t;
+
+typedef pch_trans_t rtd_trans_t;
+
+typedef struct {
+ abs_pwr_mode_t mode;
+ pch_trans_t core[UPWR_APD_CORES];
+} apd_trans_t;
+
+/* Codes for APD pwr mode as programmed in LPMODE reg */
+typedef enum {
+ ACT_APD_LPM,
+ SLP_APD_LPM = 1,
+ DSL_APD_LPM = 3,
+ PACT_APD_LPM = 7,
+ PD_APD_LPM = 15,
+ DPD_APD_LPM = 31,
+ HLD_APD_LPM = 63
+} upwr_apd_lpm_t;
+
+/* PowerSys low power config */
+struct upwr_powersys_cfg_t {
+ uint32_t lpm_mode; /* Powersys low power mode */
+};
+
+/*=*************************************************************************
+ * RTD
+ *=*************************************************************************/
+/* Config pmc PADs */
+struct upwr_pmc_pad_cfg_t {
+ uint32_t pad_close; /* PMC PAD close config */
+ uint32_t pad_reset; /* PMC PAD reset config */
+ uint32_t pad_tqsleep; /* PMC PAD TQ Sleep config */
+};
+
+/* Config regulator (internal and external) */
+struct upwr_reg_cfg_t {
+ uint32_t volt; /* Regulator voltage config */
+ uint32_t mode; /* Regulator mode config */
+};
+
+/* Config pmc monitors */
+struct upwr_pmc_mon_cfg_t {
+ uint32_t mon_hvd_en; /* PMC mon HVD */
+ uint32_t mon_lvd_en; /* PMC mon LVD */
+ uint32_t mon_lvdlvl; /* PMC mon LVDLVL */
+};
+
+/* Same monitor config for RTD (for compatibility) */
+#define upwr_pmc_mon_rtd_cfg_t upwr_pmc_mon_cfg_t
+
+typedef swt_config_t ps_rtd_swt_cfgs_t[NUM_RTD_PWR_MODES];
+typedef swt_config_t ps_apd_swt_cfgs_t[NUM_APD_PWR_MODES];
+
+/*=*************************************************************************
+ * APD
+ *=*************************************************************************/
+
+/* PowerSys PMIC config */
+struct upwr_pmic_cfg_t {
+ uint32_t volt;
+ uint32_t mode;
+ uint32_t mode_msk;
+};
+
+typedef uint32_t offs_t;
+
+struct ps_apd_pwr_mode_cfg_t {
+ #ifdef UPWR_SIMULATOR_ONLY
+ struct upwr_switch_board_t *swt_board_offs;
+ struct upwr_mem_switches_t *swt_mem_offs;
+ #else
+ offs_t swt_board_offs;
+ offs_t swt_mem_offs;
+ #endif
+ struct upwr_pmic_cfg_t pmic_cfg;
+ struct upwr_pmc_pad_cfg_t pad_cfg;
+ struct upwr_pmc_bias_cfg_t bias_cfg;
+};
+
+/* Get the pointer to swt config */
+static inline struct upwr_switch_board_t*
+get_apd_swt_cfg(volatile struct ps_apd_pwr_mode_cfg_t *cfg)
+{
+ char *ptr;
+
+ ptr = (char *)cfg;
+ ptr += (uint64_t)cfg->swt_board_offs;
+ return (struct upwr_switch_board_t *)ptr;
+}
+
+/* Get the pointer to mem config */
+static inline struct upwr_mem_switches_t*
+get_apd_mem_cfg(volatile struct ps_apd_pwr_mode_cfg_t *cfg)
+{
+ char *ptr;
+
+ ptr = (char *)cfg;
+ ptr += (uint64_t)cfg->swt_mem_offs;
+ return (struct upwr_mem_switches_t *)ptr;
+}
+
+/* Power Mode configuration */
+
+#define ps_rtd_pwr_mode_cfg_t upwr_power_mode_cfg_t
+
+/* these typedefs are just for RISC-V sizeof purpose */
+typedef uint32_t swt_board_ptr_t;
+typedef uint32_t swt_mem_ptr_t;
+
+struct upwr_power_mode_cfg_t {
+ #ifdef UPWR_SIMULATOR_ONLY
+ struct upwr_switch_board_t *swt_board; /* Swt board for mem. */
+ struct upwr_mem_switches_t *swt_mem; /* Swt to mem. arrays, perif */
+ #else
+ #ifdef __LP64__
+ uint32_t swt_board;
+ uint32_t swt_mem;
+ #else
+ struct upwr_switch_board_t *swt_board; /* Swt board for mem. */
+ struct upwr_mem_switches_t *swt_mem; /* Swt to mem. arrays, perif */
+ #endif
+ #endif
+ struct upwr_reg_cfg_t in_reg_cfg; /* internal regulator config*/
+ struct upwr_reg_cfg_t pmic_cfg; /* external regulator - pmic*/
+ struct upwr_pmc_pad_cfg_t pad_cfg; /* Pad conf for power trans*/
+ struct upwr_pmc_mon_rtd_cfg_t mon_cfg; /*monitor configuration */
+ struct upwr_pmc_bias_cfg_t bias_cfg; /* Memory/Domain Bias conf */
+ struct upwr_powersys_cfg_t pwrsys_lpm_cfg; /* pwrsys low power config*/
+};
+
+static inline unsigned int upwr_sizeof_pmode_cfg(uint32_t domain)
+{
+ switch (domain) {
+ case RTD_DOMAIN:
+ return sizeof(struct upwr_power_mode_cfg_t) +
+ (sizeof(struct upwr_switch_board_t)*
+ UPWR_PMC_SWT_WORDS) +
+ (sizeof(struct upwr_mem_switches_t)*
+ UPWR_PMC_MEM_WORDS) -
+ 2U * (sizeof(void *) - sizeof(swt_board_ptr_t));
+
+ /* fall through */
+ case APD_DOMAIN:
+ return sizeof(struct ps_apd_pwr_mode_cfg_t) +
+ (sizeof(struct upwr_switch_board_t)*
+ UPWR_PMC_SWT_WORDS) +
+ (sizeof(struct upwr_mem_switches_t)*
+ UPWR_PMC_MEM_WORDS);
+
+ /* fall through */
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*=*************************************************************************
+ * All configs
+ *=*************************************************************************/
+
+/* LVD/HVD monitor config for a single domain */
+
+/* Domain + AVD monitor config
+ * For RTD, mapped in mon_cfg.mon_hvd_en
+ * For APD, mapped temporarily in pad_cfg.pad_tqsleep
+ */
+typedef union upwr_mon_cfg_union_t {
+ volatile uint32_t R;
+ struct {
+ /* Original config, not change */
+ volatile uint32_t rsrv_1 : 8;
+ /* DOM */
+ volatile uint32_t dom_lvd_irq_ena : 1;
+ volatile uint32_t dom_lvd_rst_ena : 1;
+ volatile uint32_t dom_hvd_irq_ena : 1;
+ volatile uint32_t dom_hvd_rst_ena : 1;
+ volatile uint32_t dom_lvd_lvl : 4;
+ volatile uint32_t dom_lvd_ena : 1;
+ volatile uint32_t dom_hvd_ena : 1;
+ /* AVD */
+ volatile uint32_t avd_lvd_irq_ena : 1;
+ volatile uint32_t avd_lvd_rst_ena : 1;
+ volatile uint32_t avd_hvd_irq_ena : 1;
+ volatile uint32_t avd_hvd_rst_ena : 1;
+ volatile uint32_t avd_lvd_lvl : 4;
+ volatile uint32_t avd_lvd_ena : 1;
+ volatile uint32_t avd_hvd_ena : 1;
+ } B;
+} upwr_mon_cfg_t;
+
+/* Get the monitor config word from RAM (domaind and AVD) */
+static inline uint32_t get_mon_cfg(uint8_t dom, void *mode_cfg)
+{
+ if (dom == RTD_DOMAIN) {
+ return ((struct ps_rtd_pwr_mode_cfg_t *)mode_cfg)->mon_cfg.mon_hvd_en;
+ } else {
+ return ((struct ps_apd_pwr_mode_cfg_t *)mode_cfg)->pad_cfg.pad_tqsleep;
+ }
+}
+
+/* Set the monitor config word in RAM (domaind and AVD) */
+static inline void set_mon_cfg(uint8_t dom, void *mode_cfg,
+ upwr_mon_cfg_t mon_cfg)
+{
+ uint32_t *cfg;
+
+ if (dom == RTD_DOMAIN) {
+ cfg = (uint32_t *)&((struct ps_rtd_pwr_mode_cfg_t *)mode_cfg)->mon_cfg.mon_hvd_en;
+ } else {
+ cfg = (uint32_t *)&((struct ps_apd_pwr_mode_cfg_t *)mode_cfg)->pad_cfg.pad_tqsleep;
+ }
+
+ *cfg = mon_cfg.R;
+}
+
+#define PMIC_REG_VALID_TAG 0xAAU
+
+/**
+ * limit the max pmic register->value count to 8
+ * each data cost 4 Bytes, totally 32 Bytes
+ */
+#define MAX_PMIC_REG_COUNT 0x8U
+
+/**
+ * the configuration structure for PMIC register setting
+ *
+ * @ tag: The TAG number to judge if the data is valid or not, valid tag is PMIC_REG_VALID_TAG
+ * @ power_mode : corresponding to each domain's power mode
+ * RTD refer to upwr_ps_rtd_pwr_mode_t
+ * APD refer to abs_pwr_mode_t
+ * @ i2c_addr : i2c address
+ * @ i2c_data : i2c data value
+ */
+struct ps_pmic_reg_data_cfg_t {
+ uint32_t tag : 8;
+ uint32_t power_mode : 8;
+ uint32_t i2c_addr : 8;
+ uint32_t i2c_data : 8;
+};
+
+/* Uniformize access to PMIC cfg for RTD and APD */
+
+typedef union {
+ struct upwr_reg_cfg_t RTD;
+ struct upwr_pmic_cfg_t APD;
+} pmic_cfg_t;
+
+/* Access to PMIC mode mask and AVD mode */
+
+typedef union {
+ uint32_t R;
+ struct {
+ uint8_t mode; /* Domain PMIC mode */
+ uint8_t msk; /* Domain PMIC mode mask */
+ uint8_t avd_mode; /* AVD PMIC mode */
+ uint8_t avd_msk; /* AVD PMIC mode mask */
+ } B;
+} pmic_mode_cfg_t;
+
+/* Access RTD, APD and AVD modes and masks */
+static inline pmic_mode_cfg_t *get_pmic_mode_cfg(uint8_t dom, pmic_cfg_t *cfg)
+{
+ uint32_t *mode_cfg;
+
+ if (dom == RTD_DOMAIN) {
+ mode_cfg = &cfg->RTD.mode;
+ } else {
+ mode_cfg = &cfg->APD.mode;
+ }
+
+ return (pmic_mode_cfg_t *)mode_cfg;
+}
+
+static inline uint8_t get_pmic_mode(uint8_t dom, pmic_cfg_t *cfg)
+{
+ return get_pmic_mode_cfg(dom, cfg)->B.mode;
+}
+
+static inline void set_pmic_mode(uint8_t dom, pmic_cfg_t *cfg, uint8_t mode)
+{
+ get_pmic_mode_cfg(dom, cfg)->B.mode = mode;
+}
+
+static inline uint32_t get_pmic_mode_msk(uint8_t dom, pmic_cfg_t *cfg)
+{
+ pmic_mode_cfg_t *mode_cfg;
+
+ if (dom == RTD_DOMAIN) {
+ mode_cfg = (pmic_mode_cfg_t *)&cfg->RTD.mode;
+ return mode_cfg->B.msk;
+ } else {
+ return cfg->APD.mode_msk;
+ }
+}
+
+/* Getters and setters for AVD mode and mask */
+static inline uint8_t get_avd_pmic_mode(uint8_t dom, pmic_cfg_t *cfg)
+{
+ return get_pmic_mode_cfg(dom, cfg)->B.avd_mode;
+}
+
+static inline void set_avd_pmic_mode(uint8_t dom, pmic_cfg_t *cfg, uint8_t mode)
+{
+ get_pmic_mode_cfg(dom, cfg)->B.avd_mode = mode;
+}
+
+static inline uint8_t get_avd_pmic_mode_msk(uint8_t dom, pmic_cfg_t *cfg)
+{
+ return get_pmic_mode_cfg(dom, cfg)->B.avd_msk;
+}
+
+static inline void set_avd_pmic_mode_msk(uint8_t dom,
+ pmic_cfg_t *cfg,
+ uint8_t msk)
+{
+ get_pmic_mode_cfg(dom, cfg)->B.avd_msk = msk;
+}
+
+struct ps_delay_cfg_t {
+ uint32_t tag : 8U;
+ uint32_t rsv : 8U;
+ uint32_t exitdelay : 16U; // exit delay in us
+};
+
+#define PS_DELAY_TAG 0xA5U
+
+/* max exit delay = 0xffff = 65535 us = 65.5 ms (it is enough...) */
+/* with 8 bits, 256 -> not enough */
+
+typedef struct ps_delay_cfg_t ps_rtd_delay_cfgs_t[NUM_RTD_PWR_MODES];
+typedef struct ps_delay_cfg_t ps_apd_delay_cfgs_t[NUM_APD_PWR_MODES];
+
+typedef struct ps_rtd_pwr_mode_cfg_t ps_rtd_pwr_mode_cfgs_t[NUM_RTD_PWR_MODES];
+typedef struct ps_apd_pwr_mode_cfg_t ps_apd_pwr_mode_cfgs_t[NUM_APD_PWR_MODES];
+typedef struct ps_pmic_reg_data_cfg_t ps_rtd_pmic_reg_data_cfgs_t[MAX_PMIC_REG_COUNT];
+typedef struct ps_pmic_reg_data_cfg_t ps_apd_pmic_reg_data_cfgs_t[MAX_PMIC_REG_COUNT];
+
+struct ps_pwr_mode_cfg_t {
+ ps_rtd_pwr_mode_cfgs_t ps_rtd_pwr_mode_cfg;
+ ps_rtd_swt_cfgs_t ps_rtd_swt_cfg;
+ ps_apd_pwr_mode_cfgs_t ps_apd_pwr_mode_cfg;
+ ps_apd_swt_cfgs_t ps_apd_swt_cfg;
+ ps_rtd_pmic_reg_data_cfgs_t ps_rtd_pmic_reg_data_cfg;
+ ps_apd_pmic_reg_data_cfgs_t ps_apd_pmic_reg_data_cfg;
+ ps_rtd_delay_cfgs_t ps_rtd_delay_cfg;
+ ps_apd_delay_cfgs_t ps_apd_delay_cfg;
+
+};
+
+#define UPWR_XCP_MIN_ADDR (0x28350000U)
+#define UPWR_XCP_MAX_ADDR (0x2836FFFCU)
+
+struct upwr_reg_access_t {
+ uint32_t addr;
+ uint32_t data;
+ uint32_t mask; /* mask=0 commands read */
+};
+
+typedef upwr_pointer_msg upwr_xcp_access_msg;
+
+/* unions for the shared memory buffer */
+
+typedef union {
+ struct upwr_reg_access_t reg_access;
+} upwr_xcp_union_t;
+
+typedef union {
+ struct {
+ struct ps_rtd_pwr_mode_cfg_t rtd_struct;
+ struct upwr_switch_board_t rtd_switch;
+ struct upwr_mem_switches_t rtd_memory;
+ } rtd_pwr_mode;
+ struct {
+ struct ps_apd_pwr_mode_cfg_t apd_struct;
+ struct upwr_switch_board_t apd_switch;
+ struct upwr_mem_switches_t apd_memory;
+ } apd_pwr_mode;
+} upwr_pwm_union_t;
+
+#define MAX_SG_EXCEPT_MEM_SIZE sizeof(upwr_xcp_union_t)
+#define MAX_SG_PWRMGMT_MEM_SIZE sizeof(upwr_pwm_union_t)
+
+/**
+ * VOLTM group need shared memory for PMIC IC configuration
+ * 256 Bytes is enough for PMIC register array
+ */
+#define MAX_SG_VOLTM_MEM_SIZE 256U
+
+#endif /* UPWR_SOC_DEFS_H */
diff --git a/plat/imx/imx8ulp/xrdc/xrdc_config.h b/plat/imx/imx8ulp/xrdc/xrdc_config.h
new file mode 100644
index 0000000000..d2af55cdd0
--- /dev/null
+++ b/plat/imx/imx8ulp/xrdc/xrdc_config.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2020-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <xrdc.h>
+
+#define SP(X) ((X) << 9)
+#define SU(X) ((X) << 6)
+#define NP(X) ((X) << 3)
+#define NU(X) ((X) << 0)
+
+#define RWX 7
+#define RW 6
+#define R 4
+#define X 1
+
+struct xrdc_mda_config imx8ulp_mda[] = {
+ { 0, 7, MDA_SA_PT }, /* A core */
+ { 1, 1, MDA_SA_NS }, /* DMA1 */
+ { 2, 1, MDA_SA_NS }, /* USB */
+ { 3, 1, MDA_SA_NS }, /* PXP-> .M10 */
+ { 4, 1, MDA_SA_NS }, /* ENET */
+ { 5, 1, MDA_SA_PT }, /* CAAM */
+ { 6, 1, MDA_SA_NS }, /* USDHC0 */
+ { 7, 1, MDA_SA_NS }, /* USDHC1 */
+ { 8, 1, MDA_SA_NS }, /* USDHC2 */
+ { 9, 2, MDA_SA_NS }, /* HIFI4 */
+ { 10, 3, MDA_SA_NS }, /* GPU3D */
+ { 11, 3, MDA_SA_NS }, /* GPU2D */
+ { 12, 3, MDA_SA_NS }, /* EPDC */
+ { 13, 3, MDA_SA_NS }, /* DCNano */
+ { 14, 3, MDA_SA_NS }, /* ISI */
+ { 15, 3, MDA_SA_NS }, /* PXP->NIC_LPAV.M0 */
+ { 16, 3, MDA_SA_NS }, /* DMA2 */
+};
+
+#ifdef SPD_opteed
+#define TEE_SHM_SIZE 0x400000
+#else
+#define TEE_SHM_SIZE 0x0
+#endif
+
+#if defined(SPD_opteed) || defined(SPD_trusty)
+#define DRAM_MEM_0_START (0x80000000)
+#define DRAM_MEM_0_SIZE (BL32_BASE - 0x80000000)
+
+#define DRAM_MEM_1_START (BL32_BASE)
+#define DRAM_MEM_1_SIZE (BL32_SIZE - TEE_SHM_SIZE)
+
+#ifndef SPD_trusty
+#define DRAM_MEM_2_START (DRAM_MEM_1_START + DRAM_MEM_1_SIZE)
+#define DRAM_MEM_2_SIZE (0x80000000 - DRAM_MEM_1_SIZE - DRAM_MEM_0_SIZE)
+#else
+#define SECURE_HEAP_START (0xA9600000)
+#define SECURE_HEAP_SIZE (0x6000000)
+#define DRAM_MEM_END (0x100000000)
+
+#define DRAM_MEM_2_START (DRAM_MEM_1_START + DRAM_MEM_1_SIZE)
+#define DRAM_MEM_2_SIZE (SECURE_HEAP_START - DRAM_MEM_2_START)
+#define DRAM_MEM_3_START (DRAM_MEM_2_START + DRAM_MEM_2_SIZE)
+#define DRAM_MEM_3_SIZE (SECURE_HEAP_SIZE)
+#define DRAM_MEM_4_START (DRAM_MEM_3_START + DRAM_MEM_3_SIZE)
+#define DRAM_MEM_4_SIZE (DRAM_MEM_END - DRAM_MEM_4_START)
+#endif
+#endif
+
+struct xrdc_mrc_config imx8ulp_mrc[] = {
+ { 0, 0, 0x0, 0x30000, {0, 0, 0, 0, 0, 0, 0, 1}, {0xfff, 0} }, /* ROM1 */
+ { 1, 0, 0x60000000, 0x10000000, {1, 1, 0, 0, 1, 0, 1, 1}, {0xfff, 0} }, /* Flexspi2 */
+ { 2, 0, 0x22020000, 0x40000, {1, 1, 0, 0, 1, 0, 1, 1}, {0xfff, 0} }, /* SRAM2 */
+ { 3, 0, 0x22010000, 0x10000, {1, 1, 0, 0, 1, 0, 1, 1}, {0xfff, 0} }, /* SRAM0 */
+#if defined(SPD_opteed) || defined(SPD_trusty)
+ { 4, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, {0, 1, 0, 0, 0, 0, 0, 1}, {0xfff, 0} }, /* DRAM for A35, DMA1, USDHC0*/
+ { 4, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, {0, 1, 0, 0, 0, 0, 0, 1}, {0xfc0, 0} }, /* TEE DRAM for A35, DMA1, USDHC0*/
+ { 4, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, {0, 1, 0, 0, 0, 0, 0, 1}, {0xfff, 0} }, /* DRAM for A35, DMA1, USDHC0*/
+#ifdef SPD_trusty
+ { 4, 3, DRAM_MEM_3_START, DRAM_MEM_3_SIZE, {0, 1, 0, 0, 0, 0, 0, 1}, {0xfc0, 0} }, /* DRAM for A35, DMA1, USDHC0*/
+ { 4, 4, DRAM_MEM_4_START, DRAM_MEM_4_SIZE, {0, 1, 0, 0, 0, 0, 0, 1}, {0xfff, 0} }, /* DRAM for A35, DMA1, USDHC0*/
+#endif
+
+ { 5, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, {0, 1, 0, 0, 0, 0, 0, 0}, {0xfff, 0} }, /* DRAM for NIC_PER */
+ { 5, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, {0, 1, 0, 0, 0, 0, 0, 0}, {0xfc0, 0} }, /* TEE DRAM for NIC_PER */
+ { 5, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, {0, 1, 0, 0, 0, 0, 0, 0}, {0xfff, 0} }, /* DRAM for NIC_PER */
+#ifdef SPD_trusty
+ { 5, 3, DRAM_MEM_3_START, DRAM_MEM_3_SIZE, {0, 1, 0, 0, 0, 0, 0, 0}, {0xfc0, 0} }, /* DRAM for NIC_PER */
+ { 5, 4, DRAM_MEM_4_START, DRAM_MEM_4_SIZE, {0, 1, 0, 0, 0, 0, 0, 0}, {0xfff, 0} }, /* DRAM for NIC_PER */
+#endif
+
+#ifdef SPD_trusty
+ { 6, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, {1, 1, 0, 2, 1, 0, 1, 1}, {0xfff, 0x93f} }, /* DRAM for LPAV and RTD*/
+ { 6, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, {1, 1, 0, 1, 1, 0, 1, 1}, {0xfc0, 0} }, /* TEE DRAM for LPAV and RTD*/
+ { 6, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, {1, 1, 0, 2, 1, 0, 1, 1}, {0xfff, 0x93f} }, /* DRAM for LPAV and RTD*/
+ { 6, 3, DRAM_MEM_3_START, DRAM_MEM_3_SIZE, {1, 1, 0, 1, 1, 0, 1, 1}, {0xfc0, 0} }, /* DRAM for LPAV and RTD*/
+ { 6, 4, DRAM_MEM_4_START, DRAM_MEM_4_SIZE, {1, 1, 0, 2, 1, 0, 1, 1}, {0xfff, 0x93f} }, /* DRAM for LPAV and RTD*/
+#else
+ { 6, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, {1, 1, 0, 1, 1, 0, 1, 1}, {0xfff, 0} }, /* DRAM for LPAV and RTD*/
+ { 6, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, {1, 1, 0, 1, 1, 0, 1, 1}, {0xfc0, 0} }, /* TEE DRAM for LPAV and RTD*/
+ { 6, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, {1, 1, 0, 1, 1, 0, 1, 1}, {0xfff, 0} }, /* DRAM for LPAV and RTD*/
+#endif
+#else
+ { 4, 0, 0x80000000, 0x80000000, {0, 1, 0, 0, 0, 0, 0, 1}, {0xfff, 0} }, /* DRAM for A35, DMA1, USDHC0*/
+ { 5, 0, 0x80000000, 0x80000000, {0, 1, 0, 0, 0, 0, 0, 0}, {0xfff, 0} }, /* DRAM for NIC_PER */
+ { 6, 0, 0x80000000, 0x80000000, {1, 1, 0, 1, 1, 0, 1, 1}, {0xfff, 0} }, /* DRAM for LPAV and RTD*/
+#endif
+ { 7, 0, 0x80000000, 0x10000000, {0, 0, 1, 0, 0, 0, 0, 0}, {0xfff, 0} }, /* DRAM for HIFI4 */
+ { 7, 1, 0x90000000, 0x10000000, {0, 0, 1, 0, 0, 0, 0, 0}, {0xfff, 0} }, /* DRAM for HIFI4 */
+ { 8, 0, 0x21000000, 0x10000, {1, 1, 1, 1, 1, 0, 1, 1}, {0xfff, 0} }, /* SRAM1 */
+ { 9, 0, 0x1ffc0000, 0xc0000, {0, 0, 0, 0, 0, 0, 0, 0}, {0, 0} }, /* SSRAM for HIFI4 */
+ { 10, 0, 0x1ffc0000, 0xc0000, {0, 0, 0, 0, 0, 0, 0, 0}, {0, 0} }, /* SSRAM for LPAV */
+ { 11, 0, 0x21170000, 0x10000, {0, 0, 1, 0, 0, 0, 0, 2}, {0xfff, SP(RW) | SU(RW) | NP(RW)} }, /* HIFI4 TCM */
+ { 11, 1, 0x21180000, 0x10000, {0, 0, 1, 0, 0, 0, 0, 2}, {SP(RW) | SU(RW) | NP(RW) | NU(RW), SP(RW) | SU(RW) | NP(RW)} }, /* HIFI4 TCM */
+ { 12, 0, 0x2d400000, 0x100000, {0, 0, 0, 0, 0, 0, 0, 1}, {SP(RW) | SU(RW) | NP(RW) | NU(RW), 0} }, /* GIC500 */
+};
+
+struct xrdc_pac_msc_config imx8ulp_pdac[] = {
+ { 0, PAC_SLOT_ALL, {0, 7, 0, 0, 0, 0, 0, 7} }, /* PAC0 */
+ { 0, 44, {0, 7, 7, 0, 0, 0, 0, 7} }, /* PAC0 slot 44 for CGC1 */
+ { 0, 36, {0, 0, 0, 0, 0, 0, 7, 7} }, /* PAC0 slot 36 for CMC1 */
+ { 0, 41, {0, 0, 0, 0, 0, 0, 7, 7} }, /* PAC0 slot 41 for SIM_AD */
+ { 1, PAC_SLOT_ALL, {0, 7, 0, 0, 0, 0, 0, 7} }, /* PAC1 */
+ { 1, 0, {0, 7, 7, 0, 0, 0, 7, 7} }, /* PAC1 slot 0 for PCC4 */
+ { 1, 6, {0, 7, 7, 0, 0, 0, 0, 7} }, /* PAC1 slot 6 for LPUART6 */
+ { 1, 7, {0, 7, 7, 0, 0, 0, 0, 7} }, /* PAC1 slot 7 for LPUART7 */
+ { 1, 9, {0, 7, 7, 7, 0, 0, 0, 7} }, /* SAI5 for HIFI4 and eDMA2 */
+ { 1, 12, {0, 7, 7, 0, 0, 0, 7, 7} }, /* PAC1 slot 12 for IOMUXC1 */
+ { 2, PAC_SLOT_ALL, {7, 7, 7, 7, 0, 0, 7, 7} }, /* PAC2 */
+};
+
+struct xrdc_pac_msc_config imx8ulp_msc[] = {
+ { 0, 0, {0, 0, 0, 0, 0, 0, 7, 7} }, /* MSC0 GPIOE */
+ { 0, 1, {0, 0, 0, 0, 0, 0, 7, 7} }, /* MSC0 GPIOF */
+ { 1, MSC_SLOT_ALL, {0, 0, 0, 0, 0, 0, 7, 7} }, /* MSC1 GPIOD */
+ { 2, MSC_SLOT_ALL, {0, 0, 0, 0, 0, 0, 7, 7} }, /* MSC2 GPU3D/2D/DCNANO/DDR registers */
+};
diff --git a/plat/imx/imx8ulp/xrdc/xrdc_core.c b/plat/imx/imx8ulp/xrdc/xrdc_core.c
new file mode 100644
index 0000000000..d022e4ca93
--- /dev/null
+++ b/plat/imx/imx8ulp/xrdc/xrdc_core.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2020-2024 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/debug.h>
+#include <lib/mmio.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#include "xrdc_config.h"
+
+#define XRDC_ADDR 0x292f0000
+#define MRC_OFFSET 0x2000
+#define MRC_STEP 0x200
+
+#define XRDC_MGR_PAC_ID U(0)
+#define XRDC_MGR_PAC_SLOT U(47)
+
+enum xrdc_comp_type {
+ MDA_TYPE = (1 << 16),
+ MRC_TYPE = (2 << 16),
+ PAC_TYPE = (3 << 16),
+ MSC_TYPE = (4 << 16),
+};
+
+enum xrdc_pd_type {
+ XRDC_AD_PD,
+ XRDC_HIFI_PD,
+ XRDC_AV_PD,
+};
+
+#define XRDC_TYPE_MASK (0x7 << 16)
+#define XRDC_ID_MASK 0xFFFF
+#define XRDC_ID(id) ((id) & XRDC_ID_MASK)
+
+typedef bool (*xrdc_check_func)(enum xrdc_comp_type type, uint16_t id);
+
+/* Access below XRDC needs enable PS 8
+ * and HIFI clocks and release HIFI firstly
+ */
+uint32_t hifi_xrdc_list[] = {
+ (MDA_TYPE | XRDC_ID(9)),
+ (MRC_TYPE | XRDC_ID(7)),
+ (MRC_TYPE | XRDC_ID(9)),
+ (MRC_TYPE | XRDC_ID(11)),
+};
+
+/* Access below XRDC needs enable PS 16 firstly */
+uint32_t av_periph_xrdc_list[] = {
+ (MDA_TYPE | XRDC_ID(10)),
+ (MDA_TYPE | XRDC_ID(11)),
+ (MDA_TYPE | XRDC_ID(12)),
+ (MDA_TYPE | XRDC_ID(13)),
+ (MDA_TYPE | XRDC_ID(14)),
+ (MDA_TYPE | XRDC_ID(15)),
+ (MDA_TYPE | XRDC_ID(16)),
+
+ (PAC_TYPE | XRDC_ID(2)),
+
+ (MRC_TYPE | XRDC_ID(6)),
+ (MRC_TYPE | XRDC_ID(8)),
+ (MRC_TYPE | XRDC_ID(10)),
+
+ (MSC_TYPE | XRDC_ID(1)),
+ (MSC_TYPE | XRDC_ID(2)),
+};
+
+uint32_t imx8ulp_pac_slots[] = {
+ 61, 23, 53
+};
+
+uint32_t imx8ulp_msc_slots[] = {
+ 2, 1, 7
+};
+
+static int xrdc_config_mrc_w0_w1(uint32_t mrc_con, uint32_t region, uint32_t w0, uint32_t size)
+{
+
+ uint32_t w0_addr, w1_addr;
+
+ w0_addr = XRDC_ADDR + MRC_OFFSET + mrc_con * 0x200 + region * 0x20;
+ w1_addr = w0_addr + 4;
+
+ if ((size % 32) != 0) {
+ return -EINVAL;
+ }
+
+ mmio_write_32(w0_addr, w0 & ~0x1f);
+ mmio_write_32(w1_addr, w0 + size - 1);
+
+ return 0;
+}
+
+static int xrdc_config_mrc_w2(uint32_t mrc_con, uint32_t region, uint32_t dxsel_all)
+{
+ uint32_t w2_addr;
+
+ w2_addr = XRDC_ADDR + MRC_OFFSET + mrc_con * 0x200 + region * 0x20 + 0x8;
+
+ mmio_write_32(w2_addr, dxsel_all);
+
+ return 0;
+}
+
+static int xrdc_config_mrc_w3_w4(uint32_t mrc_con, uint32_t region, uint32_t w3, uint32_t w4)
+{
+ uint32_t w3_addr = XRDC_ADDR + MRC_OFFSET + mrc_con * 0x200 + region * 0x20 + 0xC;
+ uint32_t w4_addr = w3_addr + 4;
+
+ mmio_write_32(w3_addr, w3);
+ mmio_write_32(w4_addr, w4);
+
+ return 0;
+}
+
+static int xrdc_config_pac(uint32_t pac, uint32_t index, uint32_t dxacp)
+{
+ uint32_t w0_addr;
+ uint32_t val;
+
+ if (pac > 2U) {
+ return -EINVAL;
+ }
+
+ /* Skip the PAC slot for XRDC MGR, use Sentinel configuration */
+ if (pac == XRDC_MGR_PAC_ID && index == XRDC_MGR_PAC_SLOT) {
+ return 0;
+ }
+
+ w0_addr = XRDC_ADDR + 0x1000 + 0x400 * pac + 0x8 * index;
+
+ mmio_write_32(w0_addr, dxacp);
+
+ val = mmio_read_32(w0_addr + 4);
+ mmio_write_32(w0_addr + 4, val | BIT_32(31));
+
+ return 0;
+}
+
+static int xrdc_config_msc(uint32_t msc, uint32_t index, uint32_t dxacp)
+{
+ uint32_t w0_addr;
+ uint32_t val;
+
+ if (msc > 2) {
+ return -EINVAL;
+ }
+
+ w0_addr = XRDC_ADDR + 0x4000 + 0x400 * msc + 0x8 * index;
+
+ mmio_write_32(w0_addr, dxacp);
+
+ val = mmio_read_32(w0_addr + 4);
+ mmio_write_32(w0_addr + 4, val | BIT_32(31));
+
+ return 0;
+}
+
+static int xrdc_config_mda(uint32_t mda_con, uint32_t dom, enum xrdc_mda_sa sa)
+{
+ uint32_t w0_addr;
+ uint32_t val;
+
+ w0_addr = XRDC_ADDR + 0x800 + mda_con * 0x20;
+
+ val = mmio_read_32(w0_addr);
+
+ if (val & BIT_32(29)) {
+ mmio_write_32(w0_addr, (val & (~0xFF)) | dom |
+ BIT_32(31) | 0x20 | ((sa & 0x3) << 6));
+ } else {
+ mmio_write_32(w0_addr, dom | BIT_32(31));
+ mmio_write_32(w0_addr + 0x4, dom | BIT_32(31));
+ }
+
+ return 0;
+}
+
+static bool xrdc_check_pd(enum xrdc_comp_type type,
+ uint16_t id, enum xrdc_pd_type pd)
+{
+ unsigned int i, size;
+ uint32_t item = type | XRDC_ID(id);
+ uint32_t *list;
+
+ if (pd == XRDC_HIFI_PD) {
+ size = ARRAY_SIZE(hifi_xrdc_list);
+ list = hifi_xrdc_list;
+ } else if (pd == XRDC_AV_PD) {
+ size = ARRAY_SIZE(av_periph_xrdc_list);
+ list = av_periph_xrdc_list;
+ } else {
+ return false;
+ }
+
+ for (i = 0U; i < size; i++) {
+ if (item == list[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool xrdc_check_lpav(enum xrdc_comp_type type, uint16_t id)
+{
+ return xrdc_check_pd(type, id, XRDC_AV_PD);
+}
+
+static bool xrdc_check_hifi(enum xrdc_comp_type type, uint16_t id)
+{
+ return xrdc_check_pd(type, id, XRDC_HIFI_PD);
+}
+
+static bool xrdc_check_ad(enum xrdc_comp_type type, uint16_t id)
+{
+ return (!xrdc_check_pd(type, id, XRDC_HIFI_PD) &&
+ !xrdc_check_pd(type, id, XRDC_AV_PD));
+}
+
+static int xrdc_apply_config(xrdc_check_func check_func)
+{
+ unsigned int i, j;
+ uint32_t val;
+
+ for (i = 0U; i < ARRAY_SIZE(imx8ulp_mda); i++) {
+ if (check_func(MDA_TYPE, imx8ulp_mda[i].mda_id)) {
+ xrdc_config_mda(imx8ulp_mda[i].mda_id,
+ imx8ulp_mda[i].did, imx8ulp_mda[i].sa);
+ }
+ }
+
+ for (i = 0U; i < ARRAY_SIZE(imx8ulp_mrc); i++) {
+ if (check_func(MRC_TYPE, imx8ulp_mrc[i].mrc_id)) {
+ xrdc_config_mrc_w0_w1(imx8ulp_mrc[i].mrc_id,
+ imx8ulp_mrc[i].region_id,
+ imx8ulp_mrc[i].region_start,
+ imx8ulp_mrc[i].region_size);
+
+ val = 0;
+ for (j = 0U; j < DID_MAX; j++) {
+ val |= imx8ulp_mrc[i].dsel[j] << (3 * j);
+ }
+
+ xrdc_config_mrc_w2(imx8ulp_mrc[i].mrc_id, imx8ulp_mrc[i].region_id, val);
+ xrdc_config_mrc_w3_w4(imx8ulp_mrc[i].mrc_id, imx8ulp_mrc[i].region_id,
+ 0, imx8ulp_mrc[i].accset[0] | (imx8ulp_mrc[i].accset[1] << 16) | BIT_32(31));
+ }
+ }
+
+ for (i = 0U; i < ARRAY_SIZE(imx8ulp_pdac); i++) {
+ if (check_func(PAC_TYPE, imx8ulp_pdac[i].pac_msc_id)) {
+ val = 0;
+ for (j = 0U; j < DID_MAX; j++) {
+ val |= imx8ulp_pdac[i].dsel[j] << (3 * j);
+ }
+
+ if (imx8ulp_pdac[i].slot_id == PAC_SLOT_ALL) {
+ /* Apply to all slots*/
+ for (j = 0U; j < imx8ulp_pac_slots[imx8ulp_pdac[i].pac_msc_id]; j++) {
+ xrdc_config_pac(imx8ulp_pdac[i].pac_msc_id, j, val);
+ }
+ } else {
+ if (imx8ulp_pdac[i].slot_id >= imx8ulp_pac_slots[imx8ulp_pdac[i].pac_msc_id]) {
+ return -EINVAL;
+ }
+
+ xrdc_config_pac(imx8ulp_pdac[i].pac_msc_id, imx8ulp_pdac[i].slot_id, val);
+ }
+ }
+ }
+
+ for (i = 0U; i < ARRAY_SIZE(imx8ulp_msc); i++) {
+ if (check_func(MSC_TYPE, imx8ulp_msc[i].pac_msc_id)) {
+ val = 0;
+ for (j = 0U; j < DID_MAX; j++) {
+ val |= imx8ulp_msc[i].dsel[j] << (3 * j);
+ }
+
+ if (imx8ulp_msc[i].slot_id == MSC_SLOT_ALL) {
+ /* Apply to all slots*/
+ for (j = 0U; j < imx8ulp_msc_slots[imx8ulp_msc[i].pac_msc_id]; j++) {
+ xrdc_config_msc(imx8ulp_msc[i].pac_msc_id, j, val);
+ }
+ } else {
+ if (imx8ulp_msc[i].slot_id >= imx8ulp_msc_slots[imx8ulp_msc[i].pac_msc_id]) {
+ return -EINVAL;
+ }
+
+ xrdc_config_msc(imx8ulp_msc[i].pac_msc_id, imx8ulp_msc[i].slot_id, val);
+ }
+ }
+ }
+
+ return 0;
+}
+
+int xrdc_apply_lpav_config(void)
+{
+ /* Configure PAC2 to allow to access PCC5 */
+ xrdc_config_pac(2, 39, 0xe00000);
+
+ /* Enable the eDMA2 MP clock for MDA16 access */
+ mmio_write_32(IMX_PCC5_BASE + 0x0, 0xc0000000);
+ return xrdc_apply_config(xrdc_check_lpav);
+}
+
+int xrdc_apply_hifi_config(void)
+{
+ return xrdc_apply_config(xrdc_check_hifi);
+}
+
+int xrdc_apply_apd_config(void)
+{
+ return xrdc_apply_config(xrdc_check_ad);
+}
+
+void xrdc_enable(void)
+{
+ mmio_write_32(XRDC_ADDR, BIT(14) | BIT(15) | BIT(0));
+}
diff --git a/plat/imx/imx93/aarch64/plat_helpers.S b/plat/imx/imx93/aarch64/plat_helpers.S
new file mode 100644
index 0000000000..9987a53276
--- /dev/null
+++ b/plat/imx/imx93/aarch64/plat_helpers.S
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <cortex_a55.h>
+
+#include <platform_def.h>
+
+ .globl plat_is_my_cpu_primary
+ .globl plat_my_core_pos
+ .globl plat_calc_core_pos
+ .globl platform_mem_init
+
+ /* ------------------------------------------------------
+ * Helper macro that reads the part number of the current
+ * CPU and jumps to the given label if it matches the CPU
+ * MIDR provided.
+ *
+ * Clobbers x0.
+ * ------------------------------------------------------
+ */
+ .macro jump_if_cpu_midr _cpu_midr, _label
+
+ mrs x0, midr_el1
+ ubfx x0, x0, MIDR_PN_SHIFT, #12
+ cmp w0, #((\_cpu_midr >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+ b.eq \_label
+
+ .endm
+
+ /* ----------------------------------------------
+ * unsigned int plat_is_my_cpu_primary(void);
+ * This function checks if this is the primary CPU
+ * ----------------------------------------------
+ */
+func plat_is_my_cpu_primary
+ mrs x0, mpidr_el1
+ mov_imm x1, MPIDR_AFFINITY_MASK
+ and x0, x0, x1
+ cmp x0, #PLAT_PRIMARY_CPU
+ cset x0, eq
+ ret
+endfunc plat_is_my_cpu_primary
+
+ /* ----------------------------------------------
+ * unsigned int plat_my_core_pos(void)
+ * This function uses the plat_calc_core_pos()
+ * to get the index of the calling CPU.
+ * ----------------------------------------------
+ */
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ mov x1, #MPIDR_AFFLVL_MASK
+ and x0, x1, x0, lsr #MPIDR_AFF1_SHIFT
+ ret
+endfunc plat_my_core_pos
+
+ /*
+ * unsigned int plat_calc_core_pos(uint64_t mpidr)
+ * helper function to calculate the core position.
+ * With this function.
+ */
+func plat_calc_core_pos
+ mov x1, #MPIDR_AFFLVL_MASK
+ and x0, x1, x0, lsr #MPIDR_AFF1_SHIFT
+ ret
+endfunc plat_calc_core_pos
+
+func platform_mem_init
+ ret
+endfunc platform_mem_init
diff --git a/plat/imx/imx93/imx93_bl31_setup.c b/plat/imx/imx93/imx93_bl31_setup.c
new file mode 100644
index 0000000000..8458f6c13c
--- /dev/null
+++ b/plat/imx/imx93/imx93_bl31_setup.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <context.h>
+#include <drivers/console.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/nxp/trdc/imx_trdc.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/mmio.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat/common/platform.h>
+
+#include <imx8_lpuart.h>
+#include <plat_imx8.h>
+#include <platform_def.h>
+
+#define MAP_BL31_TOTAL \
+ MAP_REGION_FLAT(BL31_BASE, BL31_LIMIT - BL31_BASE, MT_MEMORY | MT_RW | MT_SECURE)
+#define MAP_BL31_RO \
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_MEMORY | MT_RO | MT_SECURE)
+
+static const mmap_region_t imx_mmap[] = {
+ AIPS1_MAP, AIPS2_MAP, AIPS4_MAP, GIC_MAP,
+ TRDC_A_MAP, TRDC_W_MAP, TRDC_M_MAP,
+ TRDC_N_MAP,
+ {0},
+};
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+/* get SPSR for BL33 entry */
+static uint32_t get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned long mode;
+ uint32_t spsr;
+
+ /* figure out what mode we enter the non-secure world */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ static console_t console;
+
+ console_lpuart_register(IMX_LPUART_BASE, IMX_BOOT_UART_CLK_IN_HZ,
+ IMX_CONSOLE_BAUDRATE, &console);
+
+ /* This console is only used for boot stage */
+ console_set_scope(&console, CONSOLE_FLAG_BOOT);
+
+ /*
+ * tell BL3-1 where the non-secure software image is located
+ * and the entry state information.
+ */
+ bl33_image_ep_info.pc = PLAT_NS_IMAGE_OFFSET;
+ bl33_image_ep_info.spsr = get_spsr_for_bl33_entry();
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+
+#if defined(SPD_opteed)
+ /* Populate entry point information for BL32 */
+ SET_PARAM_HEAD(&bl32_image_ep_info, PARAM_EP, VERSION_1, 0);
+ SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+ bl32_image_ep_info.pc = BL32_BASE;
+ bl32_image_ep_info.spsr = 0;
+
+ /* Pass TEE base and size to bl33 */
+ bl33_image_ep_info.args.arg1 = BL32_BASE;
+ bl33_image_ep_info.args.arg2 = BL32_SIZE;
+
+ /* Make sure memory is clean */
+ mmio_write_32(BL32_FDT_OVERLAY_ADDR, 0);
+ bl33_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+ bl32_image_ep_info.args.arg3 = BL32_FDT_OVERLAY_ADDR;
+#endif
+}
+
+void bl31_plat_arch_setup(void)
+{
+ /* no coherence memory support on i.MX9 */
+ const mmap_region_t bl_regions[] = {
+ MAP_BL31_TOTAL,
+ MAP_BL31_RO,
+ };
+
+ /* Assign all the GPIO pins to non-secure world by default */
+ mmio_write_32(GPIO2_BASE + 0x10, 0xffffffff);
+ mmio_write_32(GPIO2_BASE + 0x14, 0x3);
+ mmio_write_32(GPIO2_BASE + 0x18, 0xffffffff);
+ mmio_write_32(GPIO2_BASE + 0x1c, 0x3);
+
+ mmio_write_32(GPIO3_BASE + 0x10, 0xffffffff);
+ mmio_write_32(GPIO3_BASE + 0x14, 0x3);
+ mmio_write_32(GPIO3_BASE + 0x18, 0xffffffff);
+ mmio_write_32(GPIO3_BASE + 0x1c, 0x3);
+
+ mmio_write_32(GPIO4_BASE + 0x10, 0xffffffff);
+ mmio_write_32(GPIO4_BASE + 0x14, 0x3);
+ mmio_write_32(GPIO4_BASE + 0x18, 0xffffffff);
+ mmio_write_32(GPIO4_BASE + 0x1c, 0x3);
+
+ mmio_write_32(GPIO1_BASE + 0x10, 0xffffffff);
+ mmio_write_32(GPIO1_BASE + 0x14, 0x3);
+ mmio_write_32(GPIO1_BASE + 0x18, 0xffffffff);
+ mmio_write_32(GPIO1_BASE + 0x1c, 0x3);
+
+ setup_page_tables(bl_regions, imx_mmap);
+ enable_mmu_el3(0);
+
+ /* trdc must be initialized */
+ trdc_config();
+}
+
+void bl31_platform_setup(void)
+{
+ generic_delay_timer_init();
+
+ plat_gic_driver_init();
+ plat_gic_init();
+}
+
+void bl31_plat_runtime_setup(void)
+{
+ console_switch_state(CONSOLE_FLAG_RUNTIME);
+}
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(unsigned int type)
+{
+ if (type == NON_SECURE) {
+ return &bl33_image_ep_info;
+ }
+
+ if (type == SECURE) {
+ return &bl32_image_ep_info;
+ }
+
+ return NULL;
+}
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return COUNTER_FREQUENCY;
+}
diff --git a/plat/imx/imx93/imx93_psci.c b/plat/imx/imx93/imx93_psci.c
new file mode 100644
index 0000000000..5e1fa958d1
--- /dev/null
+++ b/plat/imx/imx93/imx93_psci.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+
+#include <plat_imx8.h>
+#include <pwr_ctrl.h>
+
+#define CORE_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL0])
+#define CLUSTER_PWR_STATE(state) ((state)->pwr_domain_state[MPIDR_AFFLVL1])
+#define SYSTEM_PWR_STATE(state) ((state)->pwr_domain_state[PLAT_MAX_PWR_LVL])
+
+/* platform secure warm boot entry */
+static uintptr_t secure_entrypoint;
+
+static bool boot_stage = true;
+
+int imx_validate_ns_entrypoint(uintptr_t ns_entrypoint)
+{
+ /* The non-secure entrypoint should be in RAM space */
+ if (ns_entrypoint < PLAT_NS_IMAGE_OFFSET) {
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+int imx_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
+ int pwr_type = psci_get_pstate_type(power_state);
+ int state_id = psci_get_pstate_id(power_state);
+
+ if (pwr_lvl > PLAT_MAX_PWR_LVL) {
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ if (pwr_type == PSTATE_TYPE_STANDBY) {
+ CORE_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+ CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+ }
+
+ if (pwr_type == PSTATE_TYPE_POWERDOWN && state_id == 0x33) {
+ CORE_PWR_STATE(req_state) = PLAT_MAX_OFF_STATE;
+ CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+void imx_set_cpu_boot_entry(unsigned int core_id, uint64_t boot_entry)
+{
+ /* set the cpu core reset entry: BLK_CTRL_S */
+ mmio_write_32(BLK_CTRL_S_BASE + CA55_RVBADDR0_L + core_id * 8, boot_entry >> 2);
+}
+
+int imx_pwr_domain_on(u_register_t mpidr)
+{
+ unsigned int core_id;
+
+ core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+ imx_set_cpu_boot_entry(core_id, secure_entrypoint);
+
+ /*
+ * When the core is first time boot up, the core is already ON after SoC POR,
+ * So 'SW_WAKEUP' can not work, so need to toggle core's reset then release
+ * the core from cpu_wait.
+ */
+ if (boot_stage) {
+ /* assert CPU core SW reset */
+ mmio_clrbits_32(SRC_SLICE(SRC_A55C0 + core_id) + 0x24, BIT(2) | BIT(0));
+ /* deassert CPU core SW reset */
+ mmio_setbits_32(SRC_SLICE(SRC_A55C0 + core_id) + 0x24, BIT(2) | BIT(0));
+ /* release the cpuwait to kick the cpu */
+ mmio_clrbits_32(BLK_CTRL_S_BASE + CA55_CPUWAIT, BIT(core_id));
+ } else {
+ /* assert the CMC MISC SW WAKEUP BIT to kick the offline core */
+ gpc_assert_sw_wakeup(CPU_A55C0 + core_id);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+void imx_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+ plat_gic_pcpu_init();
+ plat_gic_cpuif_enable();
+
+ /* below config is ok both for boot & hotplug */
+ /* clear the CPU power mode */
+ gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_RUN);
+ /* clear the SW wakeup */
+ gpc_deassert_sw_wakeup(CPU_A55C0 + core_id);
+ /* switch to GIC wakeup source */
+ gpc_select_wakeup_gic(CPU_A55C0 + core_id);
+
+ if (boot_stage) {
+ /* SRC config */
+ /* config the MEM LPM */
+ src_mem_lpm_en(SRC_A55P0_MEM + core_id, MEM_OFF);
+ /* LPM config to only ON in run mode to its domain */
+ src_mix_set_lpm(SRC_A55C0 + core_id, core_id, CM_MODE_WAIT);
+ /* white list config, only enable its own domain */
+ src_authen_config(SRC_A55C0 + core_id, 1 << core_id, 0x1);
+
+ boot_stage = false;
+ }
+}
+
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+ unsigned int i;
+
+ plat_gic_cpuif_disable();
+ write_clusterpwrdn(DSU_CLUSTER_PWR_OFF);
+
+ /*
+ * mask all the GPC IRQ wakeup to make sure no IRQ can wakeup this core
+ * as we need to use SW_WAKEUP for hotplug purpose
+ */
+ for (i = 0U; i < IMR_NUM; i++) {
+ gpc_set_irq_mask(CPU_A55C0 + core_id, i, 0xffffffff);
+ }
+ /* switch to GPC wakeup source */
+ gpc_select_wakeup_raw_irq(CPU_A55C0 + core_id);
+ /* config the target mode to suspend */
+ gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_SUSPEND);
+}
+
+void imx_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+ /* do cpu level config */
+ if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+ plat_gic_cpuif_disable();
+ imx_set_cpu_boot_entry(core_id, secure_entrypoint);
+ /* config the target mode to WAIT */
+ gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_WAIT);
+ }
+
+ /* do cluster level config */
+ if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
+ /* config the A55 cluster target mode to WAIT */
+ gpc_set_cpu_mode(CPU_A55_PLAT, CM_MODE_WAIT);
+
+ /* config DSU for cluster power down with L3 MEM RET */
+ if (is_local_state_retn(CLUSTER_PWR_STATE(target_state))) {
+ write_clusterpwrdn(DSU_CLUSTER_PWR_OFF | BIT(1));
+ }
+ }
+}
+
+void imx_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+ /* cluster level */
+ if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
+ /* set the cluster's target mode to RUN */
+ gpc_set_cpu_mode(CPU_A55_PLAT, CM_MODE_RUN);
+ }
+
+ /* do core level */
+ if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+ /* set A55 CORE's power mode to RUN */
+ gpc_set_cpu_mode(CPU_A55C0 + core_id, CM_MODE_RUN);
+ plat_gic_cpuif_enable();
+ }
+}
+
+void imx_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ unsigned int i;
+
+ for (i = IMX_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++) {
+ req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
+ }
+
+ SYSTEM_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+ CLUSTER_PWR_STATE(req_state) = PLAT_MAX_RET_STATE;
+}
+
+void __dead2 imx_system_reset(void)
+{
+ mmio_write_32(WDOG3_BASE + WDOG_CNT, 0xd928c520);
+ while ((mmio_read_32(WDOG3_BASE + WDOG_CS) & WDOG_CS_ULK) == 0U) {
+ ;
+ }
+
+ mmio_write_32(WDOG3_BASE + WDOG_TOVAL, 0x10);
+ mmio_write_32(WDOG3_BASE + WDOG_CS, 0x21e3);
+
+ while (1) {
+ wfi();
+ }
+}
+
+void __dead2 imx_system_off(void)
+{
+ mmio_setbits_32(BBNSM_BASE + BBNSM_CTRL, BBNSM_DP_EN | BBNSM_TOSP);
+
+ while (1) {
+ wfi();
+ }
+}
+
+static const plat_psci_ops_t imx_plat_psci_ops = {
+ .validate_ns_entrypoint = imx_validate_ns_entrypoint,
+ .validate_power_state = imx_validate_power_state,
+ .pwr_domain_on = imx_pwr_domain_on,
+ .pwr_domain_off = imx_pwr_domain_off,
+ .pwr_domain_on_finish = imx_pwr_domain_on_finish,
+ .pwr_domain_suspend = imx_pwr_domain_suspend,
+ .pwr_domain_suspend_finish = imx_pwr_domain_suspend_finish,
+ .get_sys_suspend_power_state = imx_get_sys_suspend_power_state,
+ .system_reset = imx_system_reset,
+ .system_off = imx_system_off,
+};
+
+/* export the platform specific psci ops */
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const plat_psci_ops_t **psci_ops)
+{
+ /* sec_entrypoint is used for warm reset */
+ secure_entrypoint = sec_entrypoint;
+ imx_set_cpu_boot_entry(0, sec_entrypoint);
+
+ pwr_sys_init();
+
+ *psci_ops = &imx_plat_psci_ops;
+
+ return 0;
+}
diff --git a/plat/imx/imx93/include/platform_def.h b/plat/imx/imx93/include/platform_def.h
new file mode 100644
index 0000000000..7efbf1c632
--- /dev/null
+++ b/plat/imx/imx93/include/platform_def.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <lib/utils_def.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+#define PLATFORM_STACK_SIZE 0xB00
+#define CACHE_WRITEBACK_GRANULE 64
+
+#define PLAT_PRIMARY_CPU U(0x0)
+#define PLATFORM_MAX_CPU_PER_CLUSTER U(2)
+#define PLATFORM_CLUSTER_COUNT U(1)
+#define PLATFORM_CLUSTER0_CORE_COUNT U(2)
+#define PLATFORM_CORE_COUNT U(2)
+
+#define IMX_PWR_LVL0 MPIDR_AFFLVL0
+
+#define PWR_DOMAIN_AT_MAX_LVL U(1)
+#define PLAT_MAX_PWR_LVL U(2)
+#define PLAT_MAX_OFF_STATE U(4)
+#define PLAT_MAX_RET_STATE U(2)
+
+#define BL31_BASE U(0x204E0000)
+#define BL31_LIMIT U(0x20520000)
+
+/* non-secure uboot base */
+/* TODO */
+#define PLAT_NS_IMAGE_OFFSET U(0x80200000)
+#define BL32_FDT_OVERLAY_ADDR (PLAT_NS_IMAGE_OFFSET + 0x3000000)
+
+/* GICv4 base address */
+#define PLAT_GICD_BASE U(0x48000000)
+#define PLAT_GICR_BASE U(0x48040000)
+
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 32)
+
+#define MAX_XLAT_TABLES 8
+#define MAX_MMAP_REGIONS 16
+
+#define IMX_LPUART_BASE U(0x44380000)
+#define IMX_BOOT_UART_CLK_IN_HZ U(24000000) /* Select 24MHz oscillator */
+#define IMX_CONSOLE_BAUDRATE 115200
+
+#define AIPSx_SIZE U(0x800000)
+#define AIPS1_BASE U(0x44000000)
+#define AIPS2_BASE U(0x42000000)
+#define AIPS3_BASE U(0x42800000)
+#define AIPS4_BASE U(0x49000000)
+#define GPIO1_BASE U(0x47400000)
+#define GPIO2_BASE U(0x43810000)
+#define GPIO3_BASE U(0x43820000)
+#define GPIO4_BASE U(0x43830000)
+
+#define TRDC_A_BASE U(0x44270000)
+#define TRDC_W_BASE U(0x42460000)
+#define TRDC_M_BASE U(0x42810000)
+#define TRDC_N_BASE U(0x49010000)
+#define TRDC_x_SISE U(0x20000)
+
+#define WDOG3_BASE U(0x42490000)
+#define WDOG_CS U(0x0)
+#define WDOG_CS_ULK BIT(11)
+#define WDOG_CNT U(0x4)
+#define WDOG_TOVAL U(0x8)
+
+#define BBNSM_BASE U(0x44440000)
+#define BBNSM_CTRL U(0x8)
+#define BBNSM_DP_EN BIT(24)
+#define BBNSM_TOSP BIT(25)
+
+#define SRC_BASE U(0x44460000)
+#define GPC_BASE U(0x44470000)
+#define BLK_CTRL_S_BASE U(0x444F0000)
+#define S400_MU_BASE U(0x47520000)
+
+/* system memory map define */
+#define AIPS2_MAP MAP_REGION_FLAT(AIPS2_BASE, AIPSx_SIZE, MT_DEVICE | MT_RW | MT_NS)
+#define AIPS1_MAP MAP_REGION_FLAT(AIPS1_BASE, AIPSx_SIZE, MT_DEVICE | MT_RW)
+#define AIPS4_MAP MAP_REGION_FLAT(AIPS4_BASE, AIPSx_SIZE, MT_DEVICE | MT_RW | MT_NS)
+#define GIC_MAP MAP_REGION_FLAT(PLAT_GICD_BASE, 0x200000, MT_DEVICE | MT_RW)
+#define TRDC_A_MAP MAP_REGION_FLAT(TRDC_A_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+#define TRDC_W_MAP MAP_REGION_FLAT(TRDC_W_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+#define TRDC_M_MAP MAP_REGION_FLAT(TRDC_M_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+#define TRDC_N_MAP MAP_REGION_FLAT(TRDC_N_BASE, TRDC_x_SISE, MT_DEVICE | MT_RW)
+
+#define COUNTER_FREQUENCY 24000000
+
+#endif /* platform_def.h */
diff --git a/plat/imx/imx93/include/pwr_ctrl.h b/plat/imx/imx93/include/pwr_ctrl.h
new file mode 100644
index 0000000000..9bcf486819
--- /dev/null
+++ b/plat/imx/imx93/include/pwr_ctrl.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PWR_CTRL_H
+#define PWR_CTRL_H
+
+#include <stdbool.h>
+
+#include <lib/mmio.h>
+
+#include <platform_def.h>
+
+/*******************************************************************************
+ * GPC definitions & declarations
+ ******************************************************************************/
+/* GPC GLOBAL */
+#define GPC_GLOBAL_BASE U(GPC_BASE + 0x4000)
+#define GPC_AUTHEN_CTRL U(0x4)
+#define GPC_DOMAIN U(0x10)
+#define GPC_MASTER U(0x1c)
+#define GPC_SYS_SLEEP U(0x40)
+#define PMIC_CTRL U(0x100)
+#define PMIC_PRE_DLY_CTRL U(0x104)
+#define PMIC_STBY_ACK_CTRL U(0x108)
+#define GPC_ROSC_CTRL U(0x200)
+#define GPC_AON_MEM_CTRL U(0x204)
+#define GPC_EFUSE_CTRL U(0x208)
+
+#define FORCE_CPUx_DISABLE(x) (1 << (16 + (x)))
+#define PMIC_STBY_EN BIT(0)
+#define ROSC_OFF_EN BIT(0)
+
+/* GPC CPU_CTRL */
+#define CM_SLICE(x) (GPC_BASE + 0x800 * (x))
+#define CM_AUTHEN_CTRL U(0x4)
+#define CM_MISC U(0xc)
+#define CM_MODE_CTRL U(0x10)
+#define CM_IRQ_WAKEUP_MASK0 U(0x100)
+#define CM_SYS_SLEEP_CTRL U(0x380)
+#define IMR_NUM U(8)
+
+/* CM_MISC */
+#define SLEEP_HOLD_EN BIT(1)
+#define IRQ_MUX BIT(5)
+#define SW_WAKEUP BIT(6)
+
+/* CM_SYS_SLEEP_CTRL */
+#define SS_WAIT BIT(0)
+#define SS_STOP BIT(1)
+#define SS_SUSPEND BIT(2)
+
+#define CM_MODE_RUN U(0x0)
+#define CM_MODE_WAIT U(0x1)
+#define CM_MODE_STOP U(0x2)
+#define CM_MODE_SUSPEND U(0x3)
+
+#define LPM_SETTING(d, m) ((m) << (((d) % 8) * 4))
+
+enum gpc_cmc_slice {
+ CPU_M33,
+ CPU_A55C0,
+ CPU_A55C1,
+ CPU_A55_PLAT,
+};
+
+/* set gpc domain assignment */
+static inline void gpc_assign_domains(unsigned int domains)
+{
+ mmio_write_32(GPC_GLOBAL_BASE + GPC_DOMAIN, domains);
+}
+
+/* force a cpu into sleep status */
+static inline void gpc_force_cpu_suspend(unsigned int cpu)
+{
+ mmio_setbits_32(GPC_GLOBAL_BASE + GPC_SYS_SLEEP, FORCE_CPUx_DISABLE(cpu));
+}
+
+static inline void gpc_pmic_stby_en(bool en)
+{
+ mmio_write_32(GPC_GLOBAL_BASE + PMIC_CTRL, en ? 1 : 0);
+}
+
+static inline void gpc_rosc_off(bool off)
+{
+ mmio_write_32(GPC_GLOBAL_BASE + GPC_ROSC_CTRL, off ? 1 : 0);
+}
+
+static inline void gpc_set_cpu_mode(unsigned int cpu, unsigned int mode)
+{
+ mmio_write_32(CM_SLICE(cpu) + CM_MODE_CTRL, mode);
+}
+
+static inline void gpc_select_wakeup_gic(unsigned int cpu)
+{
+ mmio_setbits_32(CM_SLICE(cpu) + CM_MISC, IRQ_MUX);
+}
+
+static inline void gpc_select_wakeup_raw_irq(unsigned int cpu)
+{
+ mmio_clrbits_32(CM_SLICE(cpu) + CM_MISC, IRQ_MUX);
+}
+
+static inline void gpc_assert_sw_wakeup(unsigned int cpu)
+{
+ mmio_setbits_32(CM_SLICE(cpu) + CM_MISC, SW_WAKEUP);
+}
+
+static inline void gpc_deassert_sw_wakeup(unsigned int cpu)
+{
+ mmio_clrbits_32(CM_SLICE(cpu) + CM_MISC, SW_WAKEUP);
+}
+
+static inline void gpc_clear_cpu_sleep_hold(unsigned int cpu)
+{
+ mmio_clrbits_32(CM_SLICE(cpu) + CM_MISC, SLEEP_HOLD_EN);
+}
+
+static inline void gpc_set_irq_mask(unsigned int cpu, unsigned int idx, uint32_t mask)
+{
+ mmio_write_32(CM_SLICE(cpu) + idx * 0x4 + CM_IRQ_WAKEUP_MASK0, mask);
+}
+
+/*******************************************************************************
+ * SRC definitions & declarations
+ ******************************************************************************/
+#define SRC_SLICE(x) (SRC_BASE + 0x400 * (x))
+#define SRC_AUTHEN_CTRL U(0x4)
+#define SRC_LPM_SETTING0 U(0x10)
+#define SRC_LPM_SETTING1 U(0x14)
+#define SRC_LPM_SETTING2 U(0x18)
+#define SRC_SLICE_SW_CTRL U(0x20)
+
+#define SRC_MEM_CTRL U(0x4)
+#define MEM_LP_EN BIT(2)
+#define MEM_LP_RETN BIT(1)
+
+enum mix_mem_mode {
+ MEM_OFF,
+ MEM_RETN,
+};
+
+enum src_mix_mem_slice {
+ SRC_GLOBAL,
+
+ /* MIX slice */
+ SRC_SENTINEL,
+ SRC_AON,
+ SRC_WKUP,
+ SRC_DDR,
+ SRC_DPHY,
+ SRC_ML,
+ SRC_NIC,
+ SRC_HSIO,
+ SRC_MEDIA,
+ SRC_M33P,
+ SRC_A55C0,
+ SRC_A55C1,
+ SRC_A55P,
+
+ /* MEM slice */
+ SRC_AON_MEM,
+ SRC_WKUP_MEM,
+ SRC_DDR_MEM,
+ SRC_DPHY_MEM,
+ SRC_ML_MEM,
+ SRC_NIC_MEM,
+ SRC_NIC_OCRAM,
+ SRC_HSIO_MEM,
+ SRC_MEDIA_MEM,
+ SRC_A55P0_MEM,
+ SRC_A55P1_MEM,
+ SRC_A55_SCU_MEM,
+ SRC_A55_L3_MEM,
+};
+
+static inline void src_authen_config(unsigned int mix, unsigned int wlist,
+ unsigned int lpm_en)
+{
+ mmio_write_32(SRC_SLICE(mix) + SRC_AUTHEN_CTRL, (wlist << 16) | (lpm_en << 2));
+}
+
+static inline void src_mix_set_lpm(unsigned int mix, unsigned int did, unsigned int lpm_mode)
+{
+ mmio_clrsetbits_32(SRC_SLICE(mix) + SRC_LPM_SETTING1 + (did / 8) * 0x4,
+ LPM_SETTING(did, 0x7), LPM_SETTING(did, lpm_mode));
+}
+
+static inline void src_mem_lpm_en(unsigned int mix, bool retn)
+{
+ mmio_setbits_32(SRC_SLICE(mix) + SRC_MEM_CTRL, MEM_LP_EN | (retn ? MEM_LP_RETN : 0));
+}
+
+static inline void src_mem_lpm_dis(unsigned int mix)
+{
+ mmio_clrbits_32(SRC_SLICE(mix) + SRC_MEM_CTRL, MEM_LP_EN | MEM_LP_RETN);
+}
+
+/*******************************************************************************
+ * BLK_CTRL_S definitions & declarations
+ ******************************************************************************/
+#define HW_LP_HANDHSK U(0x110)
+#define HW_LP_HANDHSK2 U(0x114)
+#define CA55_CPUWAIT U(0x118)
+#define CA55_RVBADDR0_L U(0x11c)
+#define CA55_RVBADDR0_H U(0x120)
+
+/*******************************************************************************
+ * Other definitions & declarations
+ ******************************************************************************/
+void pwr_sys_init(void);
+
+#endif /* PWR_CTRL_H */
+
diff --git a/plat/imx/imx93/plat_topology.c b/plat/imx/imx93/plat_topology.c
new file mode 100644
index 0000000000..739e2b9f35
--- /dev/null
+++ b/plat/imx/imx93/plat_topology.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <plat/common/platform.h>
+
+const unsigned char imx_power_domain_tree_desc[] = {
+ PWR_DOMAIN_AT_MAX_LVL,
+ PLATFORM_CLUSTER_COUNT,
+ PLATFORM_CLUSTER0_CORE_COUNT,
+};
+
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ return imx_power_domain_tree_desc;
+}
+
+/*
+ * Only one cluster is planned for i.MX9 family, no need
+ * to consider the cluster id
+ */
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ unsigned int cpu_id;
+
+ mpidr &= MPIDR_AFFINITY_MASK;
+
+ if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)) {
+ return -1;
+ }
+
+ cpu_id = MPIDR_AFFLVL1_VAL(mpidr);
+
+ return cpu_id;
+}
diff --git a/plat/imx/imx93/platform.mk b/plat/imx/imx93/platform.mk
new file mode 100644
index 0000000000..ed7e81f970
--- /dev/null
+++ b/plat/imx/imx93/platform.mk
@@ -0,0 +1,46 @@
+#
+# Copyright 2022-2023 NXP
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := -Iplat/imx/common/include \
+ -Iplat/imx/imx93/include \
+# Translation tables library
+include lib/xlat_tables_v2/xlat_tables.mk
+
+GICV3_SUPPORT_GIC600 := 1
+
+# Include GICv3 driver files
+include drivers/arm/gic/v3/gicv3.mk
+
+IMX_GIC_SOURCES := ${GICV3_SOURCES} \
+ plat/common/plat_gicv3.c \
+ plat/common/plat_psci_common.c \
+ plat/imx/common/plat_imx8_gic.c
+
+BL31_SOURCES += plat/common/aarch64/crash_console_helpers.S \
+ plat/imx/imx93/aarch64/plat_helpers.S \
+ plat/imx/imx93/plat_topology.c \
+ plat/imx/common/lpuart_console.S \
+ plat/imx/imx93/trdc.c \
+ plat/imx/imx93/pwr_ctrl.c \
+ plat/imx/imx93/imx93_bl31_setup.c \
+ plat/imx/imx93/imx93_psci.c \
+ lib/cpus/aarch64/cortex_a55.S \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ drivers/nxp/trdc/imx_trdc.c \
+ ${IMX_GIC_SOURCES} \
+ ${XLAT_TABLES_LIB_SRCS}
+
+RESET_TO_BL31 := 1
+HW_ASSISTED_COHERENCY := 1
+USE_COHERENT_MEM := 0
+PROGRAMMABLE_RESET_ADDRESS := 1
+COLD_BOOT_SINGLE_CPU := 1
+
+BL32_BASE ?= 0x96000000
+BL32_SIZE ?= 0x02000000
+$(eval $(call add_define,BL32_BASE))
+$(eval $(call add_define,BL32_SIZE))
diff --git a/plat/imx/imx93/pwr_ctrl.c b/plat/imx/imx93/pwr_ctrl.c
new file mode 100644
index 0000000000..624c60532c
--- /dev/null
+++ b/plat/imx/imx93/pwr_ctrl.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdbool.h>
+
+#include <platform_def.h>
+#include <pwr_ctrl.h>
+
+/*Do the necessary GPC, SRC, BLK_CTRL_S init */
+void pwr_sys_init(void)
+{
+ unsigned int cpu;
+
+ /*
+ * Assigned A55 cluster to 3, m33 to 2, A55 CORE0 & CORE1 to 0/1.
+ * domain0/1 only used for trigger LPM of themselves. A55 cluster & M33's
+ * domain assignment should be align with the TRDC DID.
+ */
+ gpc_assign_domains(0x3102);
+
+ /* CA55 core0/1 config */
+ for (cpu = CPU_A55C0; cpu <= CPU_A55_PLAT; cpu++) {
+ /* clear the cpu sleep hold */
+ gpc_clear_cpu_sleep_hold(cpu);
+ /* use gic wakeup source by default */
+ gpc_select_wakeup_gic(cpu);
+ /*
+ * Ignore A55 core0/1's LPM trigger for system sleep.
+ * normally, for A55 side, only the A55 cluster(plat)
+ * domain will be used to trigger the system wide low
+ * power mode transition.
+ */
+ if (cpu != CPU_A55_PLAT) {
+ gpc_force_cpu_suspend(cpu);
+ }
+ }
+
+ /* boot core(A55C0) */
+ src_mem_lpm_en(SRC_A55P0_MEM, MEM_OFF);
+ /* For A55 core, only need to be on in RUN mode */
+ src_mix_set_lpm(SRC_A55C0, 0x0, CM_MODE_WAIT);
+ /* whitelist: 0x1 for domain 0 only */
+ src_authen_config(SRC_A55C0, 0x1, 0x1);
+
+ /* A55 cluster */
+ gpc_select_wakeup_gic(CPU_A55_PLAT);
+ gpc_clear_cpu_sleep_hold(CPU_A55_PLAT);
+
+ /* SCU MEM must be OFF when A55 PLAT OFF */
+ src_mem_lpm_en(SRC_A55_SCU_MEM, MEM_OFF);
+ /* L3 memory in retention by default */
+ src_mem_lpm_en(SRC_A55_L3_MEM, MEM_RETN);
+
+ src_mix_set_lpm(SRC_A55P, 0x3, 0x1);
+ /* whitelist: 0x8 for domain 3 only */
+ src_authen_config(SRC_A55P, 0x8, 0x1);
+
+ /* enable the HW LP handshake between S401 & A55 cluster */
+ mmio_setbits_32(BLK_CTRL_S_BASE + HW_LP_HANDHSK, BIT(5));
+}
+
diff --git a/plat/imx/imx93/trdc.c b/plat/imx/imx93/trdc.c
new file mode 100644
index 0000000000..0d09aa6890
--- /dev/null
+++ b/plat/imx/imx93/trdc.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <lib/mmio.h>
+
+#include "trdc_config.h"
+
+struct trdc_mgr_info trdc_mgr_blks[] = {
+ { TRDC_A_BASE, 0, 0, 39, 40 },
+ { TRDC_W_BASE, 0, 0, 70, 71 },
+ { TRDC_M_BASE, 1, 0, 1, 2 },
+ { TRDC_N_BASE, 0, 1, 1, 2 },
+};
+
+unsigned int trdc_mgr_num = ARRAY_SIZE(trdc_mgr_blks);
+
+struct trdc_config_info trdc_cfg_info[] = {
+ { TRDC_A_BASE,
+ trdc_a_mbc_glbac, ARRAY_SIZE(trdc_a_mbc_glbac),
+ trdc_a_mbc, ARRAY_SIZE(trdc_a_mbc),
+ trdc_a_mrc_glbac, ARRAY_SIZE(trdc_a_mrc_glbac),
+ trdc_a_mrc, ARRAY_SIZE(trdc_a_mrc)
+ }, /* TRDC_A */
+ { TRDC_W_BASE,
+ trdc_w_mbc_glbac, ARRAY_SIZE(trdc_w_mbc_glbac),
+ trdc_w_mbc, ARRAY_SIZE(trdc_w_mbc),
+ trdc_w_mrc_glbac, ARRAY_SIZE(trdc_w_mrc_glbac),
+ trdc_w_mrc, ARRAY_SIZE(trdc_w_mrc)
+ }, /* TRDC_W */
+ { TRDC_N_BASE,
+ trdc_n_mbc_glbac, ARRAY_SIZE(trdc_n_mbc_glbac),
+ trdc_n_mbc, ARRAY_SIZE(trdc_n_mbc),
+ trdc_n_mrc_glbac, ARRAY_SIZE(trdc_n_mrc_glbac),
+ trdc_n_mrc, ARRAY_SIZE(trdc_n_mrc)
+ }, /* TRDC_N */
+};
+
+void trdc_config(void)
+{
+ unsigned int i;
+
+ /* Set MTR to DID1 */
+ trdc_mda_set_noncpu(TRDC_A_BASE, 4, false, 0x2, 0x2, 0x1);
+
+ /* Set M33 to DID2*/
+ trdc_mda_set_cpu(TRDC_A_BASE, 1, 0, 0x2, 0x0, 0x2, 0x0, 0x0, 0x0);
+
+ /* Configure the access permission for TRDC MGR and MC slots */
+ for (i = 0U; i < ARRAY_SIZE(trdc_mgr_blks); i++) {
+ trdc_mgr_mbc_setup(&trdc_mgr_blks[i]);
+ }
+
+ /* Configure TRDC user settings from config table */
+ for (i = 0U; i < ARRAY_SIZE(trdc_cfg_info); i++) {
+ trdc_setup(&trdc_cfg_info[i]);
+ }
+
+ NOTICE("TRDC init done\n");
+}
diff --git a/plat/imx/imx93/trdc_config.h b/plat/imx/imx93/trdc_config.h
new file mode 100644
index 0000000000..c623a1996b
--- /dev/null
+++ b/plat/imx/imx93/trdc_config.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2022-2023 NXP
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/nxp/trdc/imx_trdc.h>
+
+#define TRDC_A_BASE U(0x44270000)
+#define TRDC_W_BASE U(0x42460000)
+#define TRDC_M_BASE U(0x42460000)
+#define TRDC_N_BASE U(0x49010000)
+
+/* GLBAC7 is used for TRDC only, any setting to GLBAC7 will be ignored */
+
+/* aonmix */
+struct trdc_glbac_config trdc_a_mbc_glbac[] = {
+ /* MBC0 */
+ { 0, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ /* MBC1 */
+ { 1, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ { 1, 1, SP(RW) | SU(R) | NP(RW) | NU(R) },
+ { 1, 2, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+};
+
+struct trdc_mbc_config trdc_a_mbc[] = {
+ { 0, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS1 for S401 DID0 */
+ { 0, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC0 Sentinel_SOC_In for S401 DID0 */
+ { 0, 0, 2, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO1 for S401 DID0 */
+ { 1, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC1 CM33 code TCM for S401 DID0 */
+ { 1, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC1 CM33 system TCM for S401 DID0 */
+
+ { 0, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS1 for MTR DID1 */
+ { 0, 1, 1, MBC_BLK_ALL, 0, true }, /* MBC0 Sentinel_SOC_In for MTR DID1 */
+
+ { 0, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS1 for M33 DID2 */
+ { 0, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC0 Sentinel_SOC_In for M33 DID2 */
+ { 0, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO1 for M33 DID2 */
+ { 1, 2, 0, MBC_BLK_ALL, 2, true }, /* MBC1 CM33 code TCM for M33 DID2 */
+ { 1, 2, 1, MBC_BLK_ALL, 2, true }, /* MBC1 CM33 system TCM for M33 DID2 */
+
+ { 0, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS1 for A55 DID3 */
+ { 0, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC0 Sentinel_SOC_In for A55 DID3 */
+ { 0, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC0 GPIO1 for A55 DID3 */
+ { 1, 3, 0, MBC_BLK_ALL, 1, false }, /* MBC1 CM33 code TCM for A55 DID3 */
+ { 1, 3, 1, MBC_BLK_ALL, 1, false }, /* MBC1 CM33 system TCM for A55 DID3 */
+ { 1, 10, 1, MBC_BLK_ALL, 2, false }, /* MBC1 CM33 system TCM for SoC masters DID10 */
+
+ { 0, 7, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS1 for eDMA DID7 */
+};
+
+struct trdc_glbac_config trdc_a_mrc_glbac[] = {
+ { 0, 0, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+ { 0, 1, SP(R) | SU(0) | NP(R) | NU(0) },
+};
+
+struct trdc_mrc_config trdc_a_mrc[] = {
+ { 0, 2, 0, 0x00000000, 0x00040000, 0, true }, /* MRC0 M33 ROM for M33 DID2 */
+ { 0, 3, 0, 0x00100000, 0x00040000, 1, true }, /* MRC0 M33 ROM for A55 DID3 */
+};
+
+/* wakeupmix */
+struct trdc_glbac_config trdc_w_mbc_glbac[] = {
+ /* MBC0 */
+ { 0, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ /* MBC1 */
+ { 1, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+};
+
+struct trdc_mbc_config trdc_w_mbc[] = {
+ { 0, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS2 for MTR DID1 */
+ { 1, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC1 AIPS3 for MTR DID1 */
+
+ { 0, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS2 for M33 DID2 */
+ { 0, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO2_In for M33 DID2 */
+ { 0, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC0 GPIO3 for M33 DID2 */
+ { 0, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC0 DAP for M33 DID2 */
+ { 1, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC1 AIPS3 for M33 DID2 */
+ { 1, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC1 AHB_ISPAP for M33 DID2 */
+ { 1, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC1 NIC_MAIN_GPV for M33 DID2 */
+ { 1, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC1 GPIO4 for M33 DID2 */
+
+ { 0, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS2 for A55 DID3 */
+ { 0, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC0 GPIO2_In for A55 DID3 */
+ { 0, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC0 GPIO3 for A55 DID3 */
+ { 0, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC0 DAP for A55 DID3 */
+ { 1, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC1 AIPS3 for A55 DID3 */
+ { 1, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC1 AHB_ISPAP for A55 DID3 */
+ { 1, 3, 2, MBC_BLK_ALL, 0, true }, /* MBC1 NIC_MAIN_GPV for A55 DID3 */
+ { 1, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC1 GPIO4 for A55 DID3 */
+
+ { 0, 7, 0, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS2 for eDMA DID7 */
+ { 1, 7, 0, MBC_BLK_ALL, 0, false }, /* MBC1 AIPS3 for eDMA DID7 */
+};
+
+struct trdc_glbac_config trdc_w_mrc_glbac[] = {
+ /* MRC0 */
+ { 0, 0, SP(RX) | SU(RX) | NP(RX) | NU(RX) },
+ /* MRC1 */
+ { 1, 0, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+};
+
+struct trdc_mrc_config trdc_w_mrc[] = {
+ { 0, 3, 0, 0x00000000, 0x00040000, 0, false }, /* MRC0 A55 ROM for A55 DID3 */
+ { 1, 2, 0, 0x28000000, 0x08000000, 0, true }, /* MRC1 FLEXSPI1 for M33 DID2 */
+ { 1, 3, 0, 0x28000000, 0x08000000, 0, false }, /* MRC1 FLEXSPI1 for A55 DID3 */
+};
+
+/* nicmix */
+struct trdc_glbac_config trdc_n_mbc_glbac[] = {
+ /* MBC0 */
+ { 0, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ /* MBC1 */
+ { 1, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ /* MBC2 */
+ { 2, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ { 2, 1, SP(R) | SU(R) | NP(R) | NU(R) },
+ /* MBC3 */
+ { 3, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ { 3, 1, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+};
+
+struct trdc_mbc_config trdc_n_mbc[] = {
+ { 0, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC0 DDRCFG for S401 DID0 */
+ { 0, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS4 for S401 DID0 */
+ { 0, 0, 2, MBC_BLK_ALL, 0, true }, /* MBC0 MEDIAMIX for S401 DID0 */
+ { 0, 0, 3, MBC_BLK_ALL, 0, true }, /* MBC0 HSIOMIX for S401 DID0 */
+ { 1, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for S401 DID0 */
+ { 1, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for S401 DID0 */
+ { 1, 0, 2, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for S401 DID0 */
+ { 1, 0, 3, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for S401 DID0 */
+ { 2, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC2 GIC for S401 DID0 */
+ { 2, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC2 GIC for S401 DID0 */
+ { 3, 0, 0, MBC_BLK_ALL, 0, true }, /* MBC3 OCRAM for S401 DID0 */
+ { 3, 0, 1, MBC_BLK_ALL, 0, true }, /* MBC3 OCRAM for S401 DID0 */
+
+ { 0, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC0 DDRCFG for MTR DID1 */
+ { 0, 1, 1, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS4 for MTR DID1 */
+ { 0, 1, 2, MBC_BLK_ALL, 0, true }, /* MBC0 MEDIAMIX for MTR DID1 */
+ { 0, 1, 3, MBC_BLK_ALL, 0, true }, /* MBC0 HSIOMIX for MTR DID1 */
+ { 1, 1, 0, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for MTR DID1 */
+ { 1, 1, 1, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for MTR DID1 */
+ { 1, 1, 2, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for MTR DID1 */
+ { 1, 1, 3, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for MTR DID1 */
+
+ { 0, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC0 DDRCFG for M33 DID2 */
+ { 0, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC0 AIPS4 for M33 DID2 */
+ { 0, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC0 MEDIAMIX for M33 DID2 */
+ { 0, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC0 HSIOMIX for M33 DID2 */
+ { 1, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for M33 DID2 */
+ { 1, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC1 MTR_DCA, TCU, TROUT for M33 DID2 */
+ { 1, 2, 2, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for M33 DID2 */
+ { 1, 2, 3, MBC_BLK_ALL, 0, true }, /* MBC1 MLMIX for M33 DID2 */
+ { 2, 2, 0, MBC_BLK_ALL, 1, true }, /* MBC2 GIC for M33 DID2 */
+ { 2, 2, 1, MBC_BLK_ALL, 1, true }, /* MBC2 GIC for M33 DID2 */
+ { 3, 2, 0, MBC_BLK_ALL, 0, true }, /* MBC3 OCRAM for M33 DID2 */
+ { 3, 2, 1, MBC_BLK_ALL, 0, true }, /* MBC3 OCRAM for M33 DID2 */
+
+ { 0, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC0 DDRCFG for A55 DID3 */
+ { 0, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS4 for A55 DID3 */
+ { 0, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC0 MEDIAMIX for A55 DID3 */
+ { 0, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC0 HSIOMIX for A55 DID3 */
+ { 1, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC1 MTR_DCA, TCU, TROUT for A55 DID3 */
+ { 1, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC1 MTR_DCA, TCU, TROUT for A55 DID3 */
+ { 1, 3, 2, MBC_BLK_ALL, 0, false }, /* MBC1 MLMIX for A55 DID3 */
+ { 1, 3, 3, MBC_BLK_ALL, 0, false }, /* MBC1 MLMIX for A55 DID3 */
+ { 2, 3, 0, MBC_BLK_ALL, 0, false }, /* MBC2 GIC for A55 DID3 */
+ { 2, 3, 1, MBC_BLK_ALL, 0, false }, /* MBC2 GIC for A55 DID3 */
+ { 3, 3, 0, MBC_BLK_ALL, 1, true }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 1, MBC_BLK_ALL, 1, true }, /* MBC3 OCRAM for A55 DID3 */
+
+ { 3, 3, 0, 0, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 0, 1, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 0, 2, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 0, 3, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 0, 4, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 0, 5, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 1, 0, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 1, 1, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 1, 2, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 1, 3, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 1, 4, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+ { 3, 3, 1, 5, 0, false }, /* MBC3 OCRAM for A55 DID3 */
+
+ { 0, 7, 1, MBC_BLK_ALL, 0, false }, /* MBC0 AIPS4 for eDMA DID7 */
+ { 0, 7, 2, MBC_BLK_ALL, 0, false }, /* MBC0 MEDIAMIX for eDMA DID7 */
+ { 0, 7, 3, MBC_BLK_ALL, 0, false }, /* MBC0 HSIOMIX for eDMA DID7 */
+
+ { 3, 10, 0, MBC_BLK_ALL, 0, false }, /* MBC3 OCRAM for DID10 */
+ { 3, 10, 1, MBC_BLK_ALL, 0, false }, /* MBC3 OCRAM for DID10 */
+};
+
+struct trdc_glbac_config trdc_n_mrc_glbac[] = {
+ { 0, 0, SP(RW) | SU(RW) | NP(RW) | NU(RW) },
+ { 0, 1, SP(RWX) | SU(RWX) | NP(RWX) | NU(RWX) },
+};
+
+#if defined(SPD_opteed)
+#define TEE_SHM_SIZE 0x200000
+
+#define DRAM_MEM_0_START (0x80000000)
+#define DRAM_MEM_0_SIZE (BL32_BASE - 0x80000000)
+
+#define DRAM_MEM_1_START (BL32_BASE)
+#define DRAM_MEM_1_SIZE (BL32_SIZE - TEE_SHM_SIZE)
+
+#define DRAM_MEM_2_START (DRAM_MEM_1_START + DRAM_MEM_1_SIZE)
+#define DRAM_MEM_2_SIZE (0x80000000 - DRAM_MEM_1_SIZE - DRAM_MEM_0_SIZE)
+
+struct trdc_mrc_config trdc_n_mrc[] = {
+ { 0, 0, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for S400 DID0 */
+ { 0, 1, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for MTR DID1 */
+ { 0, 2, 0, 0x80000000, 0x80000000, 0, true }, /* MRC0 DRAM for M33 DID2 */
+ { 0, 8, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for Coresight, Testport DID8 */
+ { 0, 9, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for DAP DID9 */
+
+ { 0, 3, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 1, false }, /* MRC0 DRAM for A55 DID3 */
+ { 0, 5, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for USDHC1 DID5 */
+ { 0, 6, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for USDHC2 DID6 */
+ { 0, 7, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for eDMA DID7 */
+ { 0, 10, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for SoC masters DID10 */
+ { 0, 11, 0, DRAM_MEM_0_START, DRAM_MEM_0_SIZE, 0, false }, /* MRC0 DRAM for USB DID11 */
+
+ /* OPTEE memory for secure access only. */
+ { 0, 3, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 1, true }, /* MRC0 DRAM for A55 DID3 */
+ { 0, 5, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for USDHC1 DID5 */
+ { 0, 6, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for USDHC2 DID6 */
+ { 0, 7, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for eDMA DID7 */
+ { 0, 10, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for SoC masters DID10 */
+ { 0, 11, 1, DRAM_MEM_1_START, DRAM_MEM_1_SIZE, 0, true }, /* MRC0 DRAM for USB DID11 */
+
+ { 0, 3, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 1, false }, /* MRC0 DRAM for A55 DID3 */
+ { 0, 5, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for USDHC1 DID5 */
+ { 0, 6, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for USDHC2 DID6 */
+ { 0, 7, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for eDMA DID7 */
+ { 0, 10, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for SoC masters DID10 */
+ { 0, 11, 2, DRAM_MEM_2_START, DRAM_MEM_2_SIZE, 0, false }, /* MRC0 DRAM for USB DID11 */
+
+};
+#else
+struct trdc_mrc_config trdc_n_mrc[] = {
+ { 0, 0, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for S400 DID0 */
+ { 0, 1, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for MTR DID1 */
+ { 0, 2, 0, 0x80000000, 0x80000000, 0, true }, /* MRC0 DRAM for M33 DID2 */
+ { 0, 3, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for A55 DID3 */
+ { 0, 5, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for USDHC1 DID5 */
+ { 0, 6, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for USDHC2 DID6 */
+ { 0, 7, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for eDMA DID7 */
+ { 0, 8, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for Coresight, Testport DID8 */
+ { 0, 9, 0, 0x80000000, 0x80000000, 1, false }, /* MRC0 DRAM for DAP DID9 */
+ { 0, 10, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for SoC masters DID10 */
+ { 0, 11, 0, 0x80000000, 0x80000000, 0, false }, /* MRC0 DRAM for USB DID11 */
+};
+#endif