feat(el3-spmc): add support to map S-EL0 SP memory regions
Add the support to parse SP manifest to get memory regions, create xlat
tables and then program it in TTBR0.
SP manifest contains the info on memory map regions that are needed by
the SP. These regions needs to be mapped as SP running at S-EL0 does not
have privilege to do it.
Signed-off-by: Sayanta Pattanayak <sayanta.pattanayak@arm.com>
Signed-off-by: Aditya Angadi <aditya.angadi@arm.com>
Signed-off-by: Achin Gupta <achin.gupta@arm.com>
Signed-off-by: Nishant Sharma <nishant.sharma@arm.com>
Change-Id: I0cad36e5c43f8a68c94887ff2bd798933a26be27
diff --git a/services/std_svc/spm/el3_spmc/spmc.h b/services/std_svc/spm/el3_spmc/spmc.h
index e4120de..c8515e5 100644
--- a/services/std_svc/spm/el3_spmc/spmc.h
+++ b/services/std_svc/spm/el3_spmc/spmc.h
@@ -229,7 +229,8 @@
entry_point_info_t *ep_info);
void spmc_el0_sp_spsr_setup(entry_point_info_t *ep_info);
void spmc_el0_sp_setup(struct secure_partition_desc *sp,
- int32_t boot_info_reg);
+ int32_t boot_info_reg,
+ void *sp_manifest);
/*
* Helper function to perform a synchronous entry into a SP.
diff --git a/services/std_svc/spm/el3_spmc/spmc_main.c b/services/std_svc/spm/el3_spmc/spmc_main.c
index 44d0ecc..d661e8a 100644
--- a/services/std_svc/spm/el3_spmc/spmc_main.c
+++ b/services/std_svc/spm/el3_spmc/spmc_main.c
@@ -1988,7 +1988,7 @@
* context management routine.
*/
if (sp->runtime_el == S_EL0) {
- spmc_el0_sp_setup(sp, boot_info_reg);
+ spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
}
#endif /* SPMC_AT_EL3_SEL0_SP */
return 0;
diff --git a/services/std_svc/spm/el3_spmc/spmc_setup.c b/services/std_svc/spm/el3_spmc/spmc_setup.c
index 4299ced..61ee0c6 100644
--- a/services/std_svc/spm/el3_spmc/spmc_setup.c
+++ b/services/std_svc/spm/el3_spmc/spmc_setup.c
@@ -37,6 +37,12 @@
* physical CPU the SP runs on.
*/
#define SEL0_SP_EC_INDEX 0
+#define SP_MEM_READ 0x1
+#define SP_MEM_WRITE 0x2
+#define SP_MEM_EXECUTE 0x4
+#define SP_MEM_NON_SECURE 0x8
+#define SP_MEM_READ_ONLY SP_MEM_READ
+#define SP_MEM_READ_WRITE (SP_MEM_READ | SP_MEM_WRITE)
/*
* This function creates a initialization descriptor in the memory reserved
@@ -170,15 +176,206 @@
ep_info->spsr = SPSR_64(MODE_EL0, MODE_SP_EL0, DISABLE_ALL_EXCEPTIONS);
}
+static void read_optional_string(void *manifest, int32_t offset,
+ char *property, char *out, size_t len)
+{
+ const fdt32_t *prop;
+ int lenp;
+
+ prop = fdt_getprop(manifest, offset, property, &lenp);
+ if (prop == NULL) {
+ out[0] = '\0';
+ } else {
+ memcpy(out, prop, MIN(lenp, (int)len));
+ }
+}
+
+/*******************************************************************************
+ * This function will parse the Secure Partition Manifest for fetching secure
+ * partition specific memory region details. It will find base address, size,
+ * memory attributes for each memory region and then add the respective region
+ * into secure parition's translation context.
+ ******************************************************************************/
+static void populate_sp_mem_regions(struct secure_partition_desc *sp,
+ void *sp_manifest,
+ int node)
+{
+ uintptr_t base_address;
+ uint32_t mem_attr, mem_region, size;
+ struct mmap_region sp_mem_regions;
+ int32_t offset, ret;
+ char description[10];
+ char *property;
+
+ if (fdt_node_check_compatible(sp_manifest, node,
+ "arm,ffa-manifest-memory-regions") != 0) {
+ WARN("Incompatible memory region node in manifest\n");
+ return;
+ }
+
+ INFO("Mapping SP's memory regions\n");
+
+ for (offset = fdt_first_subnode(sp_manifest, node), mem_region = 0;
+ offset >= 0;
+ offset = fdt_next_subnode(sp_manifest, offset), mem_region++) {
+ read_optional_string(sp_manifest, offset, "description",
+ description, sizeof(description));
+
+ INFO("Mapping: region: %d, %s\n", mem_region, description);
+
+ property = "base-address";
+ ret = fdt_read_uint64(sp_manifest, offset, property,
+ &base_address);
+ if (ret < 0) {
+ WARN("Missing:%s for %s.\n", property, description);
+ continue;
+ }
+
+ property = "pages-count";
+ ret = fdt_read_uint32(sp_manifest, offset, property, &size);
+ if (ret < 0) {
+ WARN("Missing: %s for %s.\n", property, description);
+ continue;
+ }
+ size *= PAGE_SIZE;
+
+ property = "attributes";
+ ret = fdt_read_uint32(sp_manifest, offset, property, &mem_attr);
+ if (ret < 0) {
+ WARN("Missing: %s for %s.\n", property, description);
+ continue;
+ }
+
+ sp_mem_regions.attr = MT_USER;
+ if ((mem_attr & SP_MEM_EXECUTE) == SP_MEM_EXECUTE) {
+ sp_mem_regions.attr |= MT_CODE;
+ } else if ((mem_attr & SP_MEM_READ_ONLY) == SP_MEM_READ_ONLY) {
+ sp_mem_regions.attr |= MT_RO_DATA;
+ } else if ((mem_attr & SP_MEM_READ_WRITE) ==
+ SP_MEM_READ_WRITE) {
+ sp_mem_regions.attr |= MT_RW_DATA;
+ }
+
+ if ((mem_attr & SP_MEM_NON_SECURE) == SP_MEM_NON_SECURE) {
+ sp_mem_regions.attr |= MT_NS;
+ } else {
+ sp_mem_regions.attr |= MT_SECURE;
+ }
+
+ sp_mem_regions.base_pa = base_address;
+ sp_mem_regions.base_va = base_address;
+ sp_mem_regions.size = size;
+ sp_mem_regions.granularity = XLAT_BLOCK_SIZE(3);
+ mmap_add_region_ctx(sp->xlat_ctx_handle, &sp_mem_regions);
+ }
+}
+
+static void spmc_el0_sp_setup_mmu(struct secure_partition_desc *sp,
+ cpu_context_t *ctx)
+{
+ xlat_ctx_t *xlat_ctx;
+ uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
+ xlat_ctx = sp->xlat_ctx_handle;
+ init_xlat_tables_ctx(sp->xlat_ctx_handle);
+ setup_mmu_cfg((uint64_t *)&mmu_cfg_params, 0, xlat_ctx->base_table,
+ xlat_ctx->pa_max_address, xlat_ctx->va_max_address,
+ EL1_EL0_REGIME);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_MAIR_EL1,
+ mmu_cfg_params[MMU_CFG_MAIR]);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TCR_EL1,
+ mmu_cfg_params[MMU_CFG_TCR]);
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_TTBR0_EL1,
+ mmu_cfg_params[MMU_CFG_TTBR0]);
+}
+
+static void spmc_el0_sp_setup_sctlr_el1(cpu_context_t *ctx)
+{
+ u_register_t sctlr_el1;
+
+ /* Setup SCTLR_EL1 */
+ sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
+
+ sctlr_el1 |=
+ /*SCTLR_EL1_RES1 |*/
+ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
+ SCTLR_UCI_BIT |
+ /* RW regions at xlat regime EL1&0 are forced to be XN. */
+ SCTLR_WXN_BIT |
+ /* Don't trap to EL1 execution of WFI or WFE at EL0. */
+ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
+ /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
+ SCTLR_UCT_BIT |
+ /* Don't trap to EL1 execution of DZ ZVA at EL0. */
+ SCTLR_DZE_BIT |
+ /* Enable SP Alignment check for EL0 */
+ SCTLR_SA0_BIT |
+ /* Don't change PSTATE.PAN on taking an exception to EL1 */
+ SCTLR_SPAN_BIT |
+ /* Allow cacheable data and instr. accesses to normal memory. */
+ SCTLR_C_BIT | SCTLR_I_BIT |
+ /* Enable MMU. */
+ SCTLR_M_BIT;
+
+ sctlr_el1 &= ~(
+ /* Explicit data accesses at EL0 are little-endian. */
+ SCTLR_E0E_BIT |
+ /*
+ * Alignment fault checking disabled when at EL1 and EL0 as
+ * the UEFI spec permits unaligned accesses.
+ */
+ SCTLR_A_BIT |
+ /* Accesses to DAIF from EL0 are trapped to EL1. */
+ SCTLR_UMA_BIT
+ );
+
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
+}
+
+static void spmc_el0_sp_setup_system_registers(struct secure_partition_desc *sp,
+ cpu_context_t *ctx)
+{
+
+ spmc_el0_sp_setup_mmu(sp, ctx);
+
+ spmc_el0_sp_setup_sctlr_el1(ctx);
+
+ /* Setup other system registers. */
+
+ /* Shim Exception Vector Base Address */
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
+ SPM_SHIM_EXCEPTIONS_PTR);
+#if NS_TIMER_SWITCH
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
+ EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
+#endif
+
+ /*
+ * FPEN: Allow the Secure Partition to access FP/SIMD registers.
+ * Note that SPM will not do any saving/restoring of these registers on
+ * behalf of the SP. This falls under the SP's responsibility.
+ * TTA: Enable access to trace registers.
+ * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
+ */
+ write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
+ CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
+}
+
/* Setup context of an EL0 Secure Partition. */
void spmc_el0_sp_setup(struct secure_partition_desc *sp,
- int32_t boot_info_reg)
+ int32_t boot_info_reg,
+ void *sp_manifest)
{
mmap_region_t sel1_exception_vectors =
MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
SPM_SHIM_EXCEPTIONS_SIZE,
MT_CODE | MT_SECURE | MT_PRIVILEGED);
cpu_context_t *ctx;
+ int node;
+ int offset = 0;
ctx = &sp->ec[SEL0_SP_EC_INDEX].cpu_ctx;
@@ -201,64 +398,21 @@
mmap_add_region_ctx(sp->xlat_ctx_handle, &ffa_boot_info_region);
}
- /* Setup SCTLR_EL1 */
- u_register_t sctlr_el1 = read_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1);
-
- sctlr_el1 |=
- /*SCTLR_EL1_RES1 |*/
- /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
- SCTLR_UCI_BIT |
- /* RW regions at xlat regime EL1&0 are forced to be XN. */
- SCTLR_WXN_BIT |
- /* Don't trap to EL1 execution of WFI or WFE at EL0. */
- SCTLR_NTWI_BIT | SCTLR_NTWE_BIT |
- /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
- SCTLR_UCT_BIT |
- /* Don't trap to EL1 execution of DZ ZVA at EL0. */
- SCTLR_DZE_BIT |
- /* Enable SP Alignment check for EL0 */
- SCTLR_SA0_BIT |
- /* Don't change PSTATE.PAN on taking an exception to EL1 */
- SCTLR_SPAN_BIT |
- /* Allow cacheable data and instr. accesses to normal memory. */
- SCTLR_C_BIT | SCTLR_I_BIT |
- /* Enable MMU. */
- SCTLR_M_BIT
- ;
-
- sctlr_el1 &= ~(
- /* Explicit data accesses at EL0 are little-endian. */
- SCTLR_E0E_BIT |
- /*
- * Alignment fault checking disabled when at EL1 and EL0 as
- * the UEFI spec permits unaligned accesses.
- */
- SCTLR_A_BIT |
- /* Accesses to DAIF from EL0 are trapped to EL1. */
- SCTLR_UMA_BIT
- );
-
- write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);
-
- /* Setup other system registers. */
-
- /* Shim Exception Vector Base Address */
- write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_VBAR_EL1,
- SPM_SHIM_EXCEPTIONS_PTR);
-#if NS_TIMER_SWITCH
- write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CNTKCTL_EL1,
- EL0PTEN_BIT | EL0VTEN_BIT | EL0PCTEN_BIT | EL0VCTEN_BIT);
-#endif
-
/*
- * FPEN: Allow the Secure Partition to access FP/SIMD registers.
- * Note that SPM will not do any saving/restoring of these registers on
- * behalf of the SP. This falls under the SP's responsibility.
- * TTA: Enable access to trace registers.
- * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
+ * Parse the manifest for any memory regions that the SP wants to be
+ * mapped in its translation regime.
*/
- write_ctx_reg(get_el1_sysregs_ctx(ctx), CTX_CPACR_EL1,
- CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE));
+ node = fdt_subnode_offset_namelen(sp_manifest, offset,
+ "memory-regions",
+ sizeof("memory-regions") - 1);
+ if (node < 0) {
+ WARN("Not found memory-region configuration for SP.\n");
+ } else {
+ populate_sp_mem_regions(sp, sp_manifest, node);
+ }
+
+ spmc_el0_sp_setup_system_registers(sp, ctx);
+
}
#endif /* SPMC_AT_EL3_SEL0_SP */