aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAchin Gupta <achin.gupta@arm.com>2021-10-04 19:55:50 +0100
committerMarc Bonnici <marc.bonnici@arm.com>2021-10-07 12:35:37 +0100
commita482fca0f3e8f78713d27fa88c792383eefb5dfa (patch)
treef8368621ff6dd4831f33a01f0d69a59fb747365a
parent55e62c347891523c828d82073d172b10255e8c49 (diff)
downloadtrusted-firmware-a-a482fca0f3e8f78713d27fa88c792383eefb5dfa.tar.gz
REVISIT: Tailor SPMC at EL3 to support a S-EL1 SP
This patch makes the following changes to the EL3 SPMC implementaton with a view to support a single S-EL1 SP. 1. Tweaks initialisation of the SPMC such that its properties are hard coded in the SPMD instead of being read from a manifest. Since the SPMD and SPMC are a part of the same binary image, the manifest does not make sense. 2. Tweaks the data structure used to represent the context of a S-EL0 or S-EL1 SP to cater for fields that are specific to the latter e.g. multiple execution contexts, no need for a spin lock for the execution contexts etc. 3. Reworks the various helper and FF-A ABI handler functions to reflect the changes to the basic data structures. This patch should be revisited when support for S-EL0 SP is added. Signed-off-by: Achin Gupta <achin.gupta@arm.com> Change-Id: I9f0115dfb626a2d04a8747295d67a24e89bdd5e7
-rw-r--r--plat/arm/board/fvp/fvp_def.h2
-rw-r--r--services/std_svc/spm/spmc/spmc.h156
-rw-r--r--services/std_svc/spm/spmc/spmc_main.c1411
-rw-r--r--services/std_svc/spmd/spmd_main.c80
-rw-r--r--services/std_svc/spmd/spmd_private.h3
5 files changed, 1026 insertions, 626 deletions
diff --git a/plat/arm/board/fvp/fvp_def.h b/plat/arm/board/fvp/fvp_def.h
index 013fc5edd4..28c4e0cd91 100644
--- a/plat/arm/board/fvp/fvp_def.h
+++ b/plat/arm/board/fvp/fvp_def.h
@@ -193,7 +193,7 @@
* nwld partitions.
*/
#if defined(SPMC_AT_EL3)
-#define NWLD_PARTITION_COUNT 1
+#define NS_PARTITION_COUNT 1
#endif
#endif /* FVP_DEF_H */
diff --git a/services/std_svc/spm/spmc/spmc.h b/services/std_svc/spm/spmc/spmc.h
index 3d93975a06..fc3d6f851f 100644
--- a/services/std_svc/spm/spmc/spmc.h
+++ b/services/std_svc/spm/spmc/spmc.h
@@ -12,14 +12,43 @@
#include "spm_common.h"
/*
- * 0x1 is used for StandAloneMM Secure Parition ID.
- * Same has been followed in Optee.
- * https://github.com/OP-TEE/optee_os/blob/49dbb9ef65643c4322cf3f848910fa880d1c02f6/core/arch/arm/kernel/stmm_sp.c#L65-L67
+ * Ranges of FF-A IDs for Normal world and Secure world components. The
+ * convention matches that used by other SPMCs i.e. Hafnium and OP-TEE.
*/
-#define STMM_SP_ID (0x1)
+#define FFA_NWLD_ID_BASE 0x0
+#define FFA_NWLD_ID_LIMIT 0x7FFF
+#define FFA_SWLD_ID_BASE 0x8000
+#define FFA_SWLD_ID_LIMIT 0xFFFF
+#define FFA_SWLD_ID_MASK 0x8000
+
+#define FFA_HYP_ID FFA_NWLD_ID_BASE /* Hypervisor or physical OS is assigned 0x0 as per SMCCC */
+#define FFA_SPMC_ID U(FFA_SWLD_ID_BASE) /* First ID is reserved for the SPMC */
+#define FFA_SP_ID_BASE (FFA_SPMC_ID + 1) /* SP IDs are allocated after the SPMC ID */
+#define INV_SP_ID 0x7FFF /* Align with Hafnium implementation */
+
+/*
+ * Runtime states of an execution context as per the FF-A v1.1 specification.
+ */
+enum runtime_states {
+ RT_STATE_WAITING,
+ RT_STATE_RUNNING,
+ RT_STATE_PREEMPTED,
+ RT_STATE_BLOCKED
+};
+
+/*
+ * Runtime model of an execution context as per the FF-A v1.1 specification. Its
+ * value is valid only if the execution context is not in the waiting state.
+ */
+enum runtime_model {
+ RT_MODEL_DIR_REQ,
+ RT_MODEL_RUN,
+ RT_MODEL_INIT,
+ RT_MODEL_INTR
+};
enum runtime_el {
- EL0,
+ EL0 = 0,
EL1,
EL2,
EL3
@@ -50,12 +79,29 @@ struct mailbox {
struct spinlock lock;
};
+/*
+ * Execution context members common to both S-EL0 and S-EL1 SPs. This is a bit
+ * like struct vcpu in a hypervisor.
+ */
+typedef struct sp_exec_ctx {
+ uint64_t c_rt_ctx;
+ cpu_context_t cpu_ctx;
+ enum runtime_states rt_state;
+ enum runtime_model rt_model;
+} sp_exec_ctx_t;
-typedef struct spmc_sp_context {
+/*
+ * Structure to describe the cumulative properties of S-EL0 and S-EL1 SPs.
+ */
+typedef struct secure_partition_desc {
/*
- * Secure partition context
+ * Execution contexts allocated to this endpoint. Ideally,
+ * we need as many contexts as there are physical cpus only for a S-EL1
+ * SP which is MP-pinned. We need only a single context for a S-EL0 SP
+ * which is UP-migrateable. So, we end up wasting space when only a
+ * S-EL0 SP is deployed.
*/
- sp_context_t sp_ctx;
+ sp_exec_ctx_t ec[PLATFORM_CORE_COUNT];
/*
* ID of the Secure Partition
@@ -83,11 +129,6 @@ typedef struct spmc_sp_context {
uint32_t properties;
/*
- * Number of Execution Contexts.
- */
- uint16_t execution_ctx_count;
-
- /*
* Supported FFA Version
*/
uint32_t ffa_version;
@@ -95,19 +136,102 @@ typedef struct spmc_sp_context {
/*
* Execution State
*/
- uint32_t exception_state;
+ uint32_t execution_state;
+
+ /*
+ * Lock to protect the runtime state of a S-EL0 SP execution context.
+ */
+ spinlock_t rt_state_lock;
+
+ /*
+ * Pointer to translation table context of a S-EL0 SP.
+ */
+ xlat_ctx_t *xlat_ctx_handle;
+
+ /*
+ * Stack base of a S-EL0 SP.
+ */
+ uint64_t sp_stack_base;
+ /*
+ * Stack size of a S-EL0 SP.
+ */
+ uint64_t sp_stack_size;
-} spmc_sp_context_t;
+ /*
+ * Secondary entrypoint. Only valid for a S-EL1 SP.
+ */
+ uintptr_t secondary_ep;
+
+ /*
+ * Lock to protect the secondary entrypoint update in a SP descriptor.
+ */
+ spinlock_t secondary_ep_lock;
+} sp_desc_t;
+
+/*
+ * This define identifies the only SP that will be initialised and participate
+ * in FF-A communication. The implementation leaves the door open for more SPs
+ * to be managed in future but for now it reasonable to assume that either a
+ * single S-EL0 or a single S-EL1 SP will be supported. This define will be used
+ * to identify which SP descriptor to initialise and manage during SP runtime.
+ */
+#define ACTIVE_SP_DESC_INDEX 0
+
+/*
+ * Structure to describe the cumulative properties of the Hypervisor and
+ * NS-Endpoints.
+ */
+typedef struct ns_endpoint_desc {
+ /*
+ * ID of the NS-Endpoint or Hypervisor
+ */
+ uint16_t ns_ep_id;
+
+ /*
+ * Mailbox tracking
+ */
+ struct mailbox mailbox;
+
+ /*
+ * Supported FFA Version
+ */
+ uint32_t ffa_version;
+
+} ns_ep_desc_t;
/**
* Holds information returned for each partition by the FFA_PARTITION_INFO_GET
* interface.
*/
struct ffa_partition_info {
- uint16_t vm_id;
+ uint16_t ep_id;
uint16_t execution_ctx_count;
uint32_t properties;
};
+/* Reference to power management hooks */
+extern const spd_pm_ops_t spmc_pm;
+
+/* Setup Function for different SP types. */
+void spmc_sp_common_setup(sp_desc_t *sp, entry_point_info_t *ep_info);
+void spmc_el0_sp_setup(sp_desc_t *sp, entry_point_info_t *ep_info);
+void spmc_el1_sp_setup(sp_desc_t *sp, entry_point_info_t *ep_info);
+
+/*
+ * Helper function to perform a synchronous entry into a SP.
+ */
+uint64_t spmc_sp_synchronous_entry(sp_exec_ctx_t *ec);
+
+/*
+ * Helper function to obtain the descriptor of the current SP on a physical cpu.
+ */
+sp_desc_t* spmc_get_current_sp_ctx();
+
+/*
+ * Helper function to obtain the index of the execution context of an SP on a
+ * physical cpu.
+ */
+unsigned int get_ec_index(sp_desc_t *sp);
+
#endif /* SPMC_H */
diff --git a/services/std_svc/spm/spmc/spmc_main.c b/services/std_svc/spm/spmc/spmc_main.c
index f1ad6ac87d..1878545309 100644
--- a/services/std_svc/spm/spmc/spmc_main.c
+++ b/services/std_svc/spm/spmc/spmc_main.c
@@ -23,7 +23,7 @@
#include <services/spmc_svc.h>
#include <services/spmd_svc.h>
#include <services/logical_sp.h>
-
+#include <smccc_helpers.h>
#include <plat/arm/common/plat_arm.h>
#include <platform_def.h>
@@ -31,22 +31,20 @@
#include "spmc.h"
#include "spm_shim_private.h"
-#define NWLD_CTX_ID 0
-#define INVALID_PARTITION_ID 0x7FFF
-
-#define NWLD_CTX_INDEX 0
-#define SWLD_CTX_INDEX 0
-
-#define FFA_PARTITION_ID_BASE 0x8002
-
-static spmc_sp_context_t spmc_sp_ctx[SECURE_PARTITION_COUNT];
-static spmc_sp_context_t spmc_nwld_ctx[NWLD_PARTITION_COUNT];
+/*
+ * Allocate a secure partition descriptor to describe each SP in the system that
+ * does reside at EL3.
+ */
+static sp_desc_t sp_desc[SECURE_PARTITION_COUNT];
-/* Reserve first ID for the normal world ctx. */
-static unsigned int next_available_sp_index = 0;
-static unsigned int schedule_sp_index = 0;
+/*
+ * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
+ * the system that interacts with a SP. It is used to track the Hypervisor
+ * buffer pair, version and ID for now. It could be extended to track VM
+ * properties when the SPMC supports indirect messaging.
+ */
+static ns_ep_desc_t ns_ep_desc[NS_PARTITION_COUNT];
-static void *spmc_manifest;
el3_lp_desc_t* get_el3_lp_array(void) {
el3_lp_desc_t *el3_lp_descs;
@@ -56,473 +54,88 @@ el3_lp_desc_t* get_el3_lp_array(void) {
}
/*
- * Initial implementation to obtain source of SMC ctx.
- * Currently assumes only single context per world.
- * TODO: Expand to track multiple partitions.
+ * Helper function to obtain the descriptor of the last SP to whom control was
+ * handed to on this physical cpu. Currently, we assume there is only one SP.
+ * TODO: Expand to track multiple partitions. In this case, the last S-EL0 SP on
+ * each physical cpu could be different.
*/
-spmc_sp_context_t* spmc_get_current_ctx(uint64_t flags) {
- if (is_caller_secure(flags)) {
- return &(spmc_sp_ctx[SWLD_CTX_INDEX]);
- }
- else {
- return &(spmc_nwld_ctx[NWLD_CTX_INDEX]);
- }
+sp_desc_t* spmc_get_current_sp_ctx() {
+ return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
}
/* Helper function to get pointer to SP context from it's ID. */
-spmc_sp_context_t* spmc_get_sp_ctx(uint16_t id) {
+sp_desc_t* spmc_get_sp_ctx(uint16_t id) {
/* Check for Swld Partitions. */
for (int i = 0; i < SECURE_PARTITION_COUNT; i++) {
- if (spmc_sp_ctx[i].sp_id == id) {
- return &(spmc_sp_ctx[i]);
- }
- }
- /* Check for Nwld partitions. */
- for (int i = 0; i < NWLD_PARTITION_COUNT; i++) {
- if (spmc_nwld_ctx[i].sp_id == id) {
- return &(spmc_nwld_ctx[i]);
+ if (sp_desc[i].sp_id == id) {
+ return &(sp_desc[i]);
}
}
return NULL;
}
-/*******************************************************************************
- * Return FFA_ERROR with specified error code
- ******************************************************************************/
-static uint64_t spmc_ffa_error_return(void *handle, int error_code)
-{
- SMC_RET8(handle, FFA_ERROR,
- FFA_TARGET_INFO_MBZ, error_code,
- FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
- FFA_PARAM_MBZ, FFA_PARAM_MBZ);
-}
-
-/*******************************************************************************
- * This function will parse the Secure Partition Manifest for fetching seccure
- * partition specific memory region details. It will find base address, size,
- * memory attributes for each memory region and then add the respective region
- * into secure parition's translation context.
- ******************************************************************************/
-static void populate_sp_mem_regions(sp_context_t *sp_ctx,
- void *sp_manifest,
- int node)
-{
- uintptr_t base_address, size;
- uint32_t mem_attr, granularity, mem_region;
- struct mmap_region sp_mem_regions;
- int32_t offset, ret;
-
- for (offset = fdt_first_subnode(sp_manifest, node), mem_region = 0;
- offset >= 0;
- offset = fdt_next_subnode(sp_manifest, offset), mem_region++) {
- if (offset < 0)
- WARN("Error happened in SPMC manifest bootargs reading\n");
- else {
- ret = fdt_get_reg_props_by_index(sp_manifest, offset,
- 0, &base_address,
- &size);
- if (ret < 0) {
- WARN("Missing reg property for Mem region %u.\n", mem_region);
- continue;
- }
-
- ret = fdt_read_uint32(sp_manifest,
- offset, "mem_region_access",
- &mem_attr);
- if (ret < 0) {
- WARN("Missing Mem region %u access attributes.\n", mem_region);
- continue;
- }
-
- sp_mem_regions.attr = MT_USER;
- if (mem_attr == MEM_CODE)
- sp_mem_regions.attr |= MT_CODE;
- else if (mem_attr == MEM_RO_DATA)
- sp_mem_regions.attr |= MT_RO_DATA;
- else if (mem_attr == MEM_RW_DATA)
- sp_mem_regions.attr |= MT_RW_DATA;
- else if (mem_attr == MEM_RO)
- sp_mem_regions.attr |= MT_RO;
- else if (mem_attr == MEM_RW)
- sp_mem_regions.attr |= MT_RW;
-
- ret = fdt_read_uint32(sp_manifest,
- offset, "mem_region_type",
- &mem_attr);
- if (ret < 0) {
- WARN("Missing Mem region %u type.\n", mem_region);
- continue;
- }
-
- if (mem_attr == MEM_DEVICE)
- sp_mem_regions.attr |= MT_DEVICE;
- else if (mem_attr == MEM_NON_CACHE)
- sp_mem_regions.attr |= MT_NON_CACHEABLE;
- else if (mem_attr == MEM_NORMAL)
- sp_mem_regions.attr |= MT_MEMORY;
-
- ret = fdt_read_uint32(sp_manifest,
- offset,
- "mem_region_secure",
- &mem_attr);
- if (ret < 0) {
- WARN("Missing Mem region %u secure state.\n", mem_region);
- continue;
- }
-
- if (mem_attr == MEM_SECURE)
- sp_mem_regions.attr |= MT_SECURE;
- else if (mem_attr == MEM_NON_SECURE)
- sp_mem_regions.attr |= MT_NS;
-
- ret = fdt_read_uint32(sp_manifest,
- offset, "granularity",
- &granularity);
- if (ret < 0) {
- WARN("Missing Mem region %u granularity.\n", mem_region);
- continue;
- }
- sp_mem_regions.base_pa = base_address;
- sp_mem_regions.base_va = base_address;
- sp_mem_regions.size = size;
- sp_mem_regions.granularity = granularity;
- mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
- &sp_mem_regions);
- }
- }
-}
-
/*
- * Convert from the traditional TF-A representation of a UUID,
- * big endian uint8 to little endian uint32 to be inline
- * with FF-A.
+ * Helper function to obtain the descriptor of the Hypervisor. We assume that
+ * the first descriptor is reserved for the Hypervisor.
*/
-void convert_uuid_endian(uint8_t *be_8, uint32_t *le_32) {
- for (int i = 0; i < 4; i++){
- le_32[i] = be_8[(i*4)+0] << 24 |
- be_8[(i*4)+1] << 16 |
- be_8[(i*4)+2] << 8 |
- be_8[(i*4)+3] << 0;
- }
+ns_ep_desc_t* spmc_get_hyp_ctx() {
+ return &(ns_ep_desc[0]);
}
-/*******************************************************************************
- * This function will parse the Secure Partition Manifest. From manifest, it
- * will fetch details for preparing Secure partition image context and secure
- * partition image boot arguments if any. Also if there are memory regions
- * present in secure partition manifest then it will invoke function to map
- * respective memory regions.
- ******************************************************************************/
-static int sp_manifest_parse(void *sp_manifest, int offset,
- sp_context_t *sp_ctx,
- entry_point_info_t *ep_info)
+/*
+ * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
+ * or the last SP that was run.
+ */
+struct mailbox *spmc_get_mbox_desc(uint64_t flags)
{
- int32_t ret, node;
- spmc_sp_context_t *ctx = &(spmc_sp_ctx[next_available_sp_index]);
-
- node = fdt_subnode_offset_namelen(sp_manifest, offset,
- "ffa-config",
- sizeof("ffa-config") - 1);
- if (node < 0)
- WARN("Not found any ffa-config for SP.\n");
- else {
- uint64_t config;
- uint32_t config_32;
-
- ret = fdt_read_uint32(sp_manifest, node,
- "partition_id", &config_32);
- if (ret)
- WARN("Missing Secure Partition ID.\n");
- else
- ctx->sp_id = config_32;
-
- ret = fdt_read_uint64(sp_manifest, node,
- "sp_arg0", &config);
- if (ret)
- WARN("Missing Secure Partition arg0.\n");
- else
- ep_info->args.arg0 = config;
-
- ret = fdt_read_uint64(sp_manifest, node,
- "sp_arg1", &config);
- if (ret)
- WARN("Missing Secure Partition arg1.\n");
- else
- ep_info->args.arg1 = config;
-
- ret = fdt_read_uint64(sp_manifest, node,
- "sp_arg2", &config);
- if (ret)
- WARN("Missing Secure Partition arg2.\n");
- else
- ep_info->args.arg2 = config;
-
- ret = fdt_read_uint64(sp_manifest, node,
- "sp_arg3", &config);
- if (ret)
- WARN("Missing Secure Partition arg3.\n");
- else
- ep_info->args.arg3 = config;
-
- ret = fdt_read_uint64(sp_manifest, node,
- "load_address", &config);
- if (ret)
- WARN("Missing Secure Partition Entry Point.\n");
- else
- ep_info->pc = config;
-
- ret = fdt_read_uint64(sp_manifest, node,
- "stack_base", &config);
- if (ret)
- WARN("Missing Secure Partition Stack Base.\n");
- else
- sp_ctx->sp_stack_base = config;
-
- ret = fdt_read_uint64(sp_manifest, node,
- "stack_size", &config);
- if (ret)
- WARN("Missing Secure Partition Stack Size.\n");
- else
- sp_ctx->sp_pcpu_stack_size = config;
-
- uint8_t be_uuid[16];
- ret = fdtw_read_uuid(sp_manifest, node, "uuid", 16,
- be_uuid);
- if (ret)
- WARN("Missing Secure Partition UUID.\n");
- else {
- /* Convert from BE to LE to store internally. */
- convert_uuid_endian(be_uuid, ctx->uuid);
- }
-
- ret = fdt_read_uint32(sp_manifest, node,
- "execution-ctx-count", &config_32);
- if (ret)
- WARN("Missing Secure Partition Execution Context Count.\n");
- else
- ctx->execution_ctx_count = config_32;
-
- ret = fdt_read_uint32(sp_manifest, node,
- "ffa-version", &config_32);
- if (ret)
- WARN("Missing Secure Partition FFA Version.\n");
- else
- ctx->ffa_version = config_32;
-
- ret = fdt_read_uint32(sp_manifest, node,
- "execution-state", &config_32);
- if (ret)
- WARN("Missing Secure Partition Execution State.\n");
- else
- ctx->ffa_version = config_32;
-
- ret = fdt_read_uint32(sp_manifest, node,
- "runtime-el", &config_32);
- if (ret)
- WARN("Missing SP Runtime EL information.\n");
- else {
- ctx->runtime_el = config_32;
- if (config_32 == 0) {
- /* Setup Secure Partition SPSR for S-EL0*/
- ep_info->spsr =
- SPSR_64(MODE_EL0, MODE_SP_EL0,
- DISABLE_ALL_EXCEPTIONS);
-
- sp_ctx->xlat_ctx_handle->xlat_regime =
- EL1_EL0_REGIME;
-
- /* This region contains the exception
- * vectors used at S-EL1.
- */
- mmap_region_t sel1_exception_vectors =
- MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
- SPM_SHIM_EXCEPTIONS_SIZE,
- MT_CODE | MT_SECURE | MT_PRIVILEGED);
- mmap_add_region_ctx(sp_ctx->xlat_ctx_handle,
- &sel1_exception_vectors);
- }
- else if (config_32 == 1) {
- /* Setup Secure Partition SPSR for S-EL1 */
- ep_info->spsr =
- SPSR_64(MODE_EL1, MODE_SP_ELX,
- DISABLE_ALL_EXCEPTIONS);
- sp_ctx->xlat_ctx_handle->xlat_regime =
- EL1_EL0_REGIME;
- }
- }
- }
-
- node = fdt_subnode_offset_namelen(sp_manifest, offset,
- "mem-regions",
- sizeof("mem-regions") - 1);
- if (node < 0)
- WARN("Not found mem-region configuration for SP.\n");
- else {
- populate_sp_mem_regions(sp_ctx, sp_manifest, node);
- }
-
- return 0;
+ /* Obtain the RX/TX buffer pair descriptor. */
+ if (is_caller_secure(flags))
+ return &(spmc_get_current_sp_ctx()->mailbox);
+ else
+ return &(spmc_get_hyp_ctx()->mailbox);
}
/*******************************************************************************
- * This function gets the Secure Partition Manifest base and maps the manifest
- * region.
- * Currently, one Secure partition manifest is considered and prepared the
- * Secure Partition context for the same.
- *
+ * This function returns to the place where spmc_sp_synchronous_entry() was
+ * called originally.
******************************************************************************/
-static int find_and_prepare_sp_context(void)
+__dead2 void spmc_sp_synchronous_exit(sp_exec_ctx_t *ec, uint64_t rc)
{
- void *sp_manifest;
- uintptr_t manifest_base, manifest_base_align;
- entry_point_info_t *next_image_ep_info;
- int32_t ret;
-
- next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
- if (next_image_ep_info == NULL) {
- WARN("TEST: No Secure Partition image provided by BL2\n");
- return -ENOENT;
- }
-
- sp_manifest = (void *)next_image_ep_info->args.arg0;
- if (sp_manifest == NULL) {
- WARN("Secure Partition(SP) manifest absent\n");
- return -ENOENT;
- }
-
- manifest_base = (uintptr_t)sp_manifest;
- manifest_base_align = page_align(manifest_base, UP);
-
- manifest_base_align = page_align(manifest_base, DOWN);
-
- /* Map the secure partition manifest region in the EL3 translation regime.
- * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
- * alignment the region of 1 PAGE_SIZE from manifest align base may not
- * completely accommodate the secure partition manifest region.
+ /*
+ * The SPM must have initiated the original request through a
+ * synchronous entry into the secure partition. Jump back to the
+ * original C runtime context with the value of rc in x0;
*/
- ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
- manifest_base_align,
- PAGE_SIZE * 2,
- MT_RO_DATA);
- if (ret != 0) {
- ERROR("Error while mapping SP manifest (%d).\n", ret);
- return ret;
- }
-
- ret = fdt_node_offset_by_compatible(sp_manifest, -1, "arm,ffa-manifest");
- if (ret < 0) {
- ERROR("Error happened in SP manifest reading.\n");
- return -EINVAL;
- } else {
- spmc_sp_context_t *ctx = &(spmc_sp_ctx[next_available_sp_index]);
- sp_context_t *sp_ctx = &(ctx->sp_ctx);
- cpu_context_t *cpu_ctx = &(sp_ctx->cpu_ctx);
-
- /* Assign translation tables context. */
- sp_ctx->xlat_ctx_handle = spm_get_sp_xlat_context();
+ spm_secure_partition_exit(ec->c_rt_ctx, rc);
- /*
- * Initialize CPU context
- * ----------------------
- */
- entry_point_info_t ep_info = {0};
-
- SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
-
- ret = sp_manifest_parse(sp_manifest, ret, sp_ctx, &ep_info);
- if (ret) {
- ERROR(" Error in Secure Partition(SP) manifest parsing.\n");
- return ret;
- }
-
- /* Assign FFA Partition ID if not already assigned */
- if (ctx->sp_id == INVALID_PARTITION_ID) {
- ctx->sp_id = FFA_PARTITION_ID_BASE + next_available_sp_index;
- }
-
- cm_setup_context(cpu_ctx, &ep_info);
- write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_SP_EL0,
- sp_ctx->sp_stack_base + sp_ctx->sp_pcpu_stack_size);
-
- schedule_sp_index = next_available_sp_index;
-
-
- /* TODO: Perform any common initialisation? */
- spm_sp_common_setup(sp_ctx);
-
- /* Call the appropriate initalisation function depending on partition type. */
- if (ctx->runtime_el == EL0) {
- spm_el0_sp_setup(sp_ctx);
- }
- else if (ctx->runtime_el == EL1) {
- spm_el1_sp_setup(sp_ctx);
- }
- else {
- ERROR("Unexpected runtime EL: %d\n", ctx->runtime_el);
- }
- next_available_sp_index++;
- }
-
- return 0;
-}
-
-
-static int32_t logical_sp_init(void)
-{
- uint64_t rc = 0;
-
- el3_lp_desc_t *el3_lp_descs;
- el3_lp_descs = get_el3_lp_array();
-
- INFO("Logical Secure Partition init start.\n");
- /* TODO: do some initialistion. */
- for (int i = 0; i < EL3_LP_DESCS_NUM; i++) {
- el3_lp_descs[i].init();
- }
-
- INFO("Secure Partition initialized.\n");
-
- return rc;
+ panic();
}
-static int32_t sp_init(void)
+/*******************************************************************************
+ * Return FFA_ERROR with specified error code
+ ******************************************************************************/
+static uint64_t spmc_ffa_error_return(void *handle, int error_code)
{
- uint64_t rc;
- spmc_sp_context_t *ctx;
- sp_context_t *sp_ctx;
-
- ctx = &(spmc_sp_ctx[schedule_sp_index]);
- sp_ctx = &(ctx->sp_ctx);
- sp_ctx->state = SP_STATE_RESET;
-
- INFO("Secure Partition (0x%x) init start.\n", ctx->sp_id);
-
- rc = spm_sp_synchronous_entry(sp_ctx);
- assert(rc == 0);
-
- sp_ctx->state = SP_STATE_IDLE;
-
- INFO("Secure Partition initialized.\n");
-
- return !rc;
+ SMC_RET8(handle, FFA_ERROR,
+ FFA_TARGET_INFO_MBZ, error_code,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
+ FFA_PARAM_MBZ, FFA_PARAM_MBZ);
}
-
/*******************************************************************************
* This function returns either forwards the request to the other world or returns
* with an ERET depending on the source of the call.
* Assuming if call is for a logical SP it has already been taken care of.
******************************************************************************/
-
static uint64_t spmc_smc_return(uint32_t smc_fid,
- bool secure_origin,
- uint64_t x1,
- uint64_t x2,
- uint64_t x3,
- uint64_t x4,
- void *handle,
- void *cookie,
- uint64_t flags) {
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *handle,
+ void *cookie,
+ uint64_t flags) {
unsigned int cs;
@@ -557,29 +170,45 @@ bool compare_uuid(uint32_t *uuid1, uint32_t *uuid2) {
}
static uint64_t partition_info_get_handler(uint32_t smc_fid,
- bool secure_origin,
- uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
- void *cookie, void *handle, uint64_t flags)
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
{
int index, partition_count;
struct ffa_partition_info *info;
el3_lp_desc_t *el3_lp_descs = get_el3_lp_array();
-
+ struct mailbox *mbox;
uint32_t uuid[4];
uuid[0] = x1;
uuid[1] = x2;
uuid[2] = x3;
uuid[3] = x4;
- spmc_sp_context_t *ctx = spmc_get_current_ctx(flags);
- info = (struct ffa_partition_info *) ctx->mailbox.rx_buffer;
+ /* Obtain the RX/TX buffer pair descriptor. */
+ mbox = spmc_get_mbox_desc(flags);
+
+ /*
+ * If the caller has not bothered registering its RX/TX pair then return
+ * the invalid parameters error code.
+ * TODO: Need a clarification in the FF-A spec for this.
+ */
+ if (0 == mbox->rx_buffer) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ info = (struct ffa_partition_info *) mbox->rx_buffer;
- spin_lock(&ctx->mailbox.lock);
- if (ctx->mailbox.state != MAILBOX_STATE_EMPTY) {
+ spin_lock(&mbox->lock);
+ if (mbox->state != MAILBOX_STATE_EMPTY) {
return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
}
- ctx->mailbox.state = MAILBOX_STATE_FULL;
- spin_unlock(&ctx->mailbox.lock);
+ mbox->state = MAILBOX_STATE_FULL;
+ spin_unlock(&mbox->lock);
partition_count = 0;
/* Deal with Logical Partitions. */
@@ -587,7 +216,7 @@ static uint64_t partition_info_get_handler(uint32_t smc_fid,
if (compare_uuid(uuid, el3_lp_descs[index].uuid) ||
(uuid[0] == 0 && uuid[1] == 0 && uuid[2] == 0 && uuid[3] == 0)) {
/* Found a matching UUID, populate appropriately. */
- info[partition_count].vm_id = el3_lp_descs[index].sp_id;
+ info[partition_count].ep_id = el3_lp_descs[index].sp_id;
info[partition_count].execution_ctx_count = PLATFORM_CORE_COUNT;
info[partition_count].properties = el3_lp_descs[index].properties;
partition_count++;
@@ -596,12 +225,15 @@ static uint64_t partition_info_get_handler(uint32_t smc_fid,
/* Deal with physical SP's. */
for(index = 0; index < SECURE_PARTITION_COUNT; index++){
- if (compare_uuid(uuid, spmc_sp_ctx[index].uuid) ||
+ unsigned int execution_ctx_count;
+ if (compare_uuid(uuid, sp_desc[index].uuid) ||
(uuid[0] == 0 && uuid[1] == 0 && uuid[2] == 0 && uuid[3] == 0)) {
/* Found a matching UUID, populate appropriately. */
- info[partition_count].vm_id = spmc_sp_ctx[index].sp_id;
- info[partition_count].execution_ctx_count = spmc_sp_ctx[index].execution_ctx_count;
- info[partition_count].properties = spmc_sp_ctx[index].properties;
+ info[partition_count].ep_id = sp_desc[index].sp_id;
+ /* Use the EL to determine the number of execution contexts */
+ execution_ctx_count = (sp_desc[index].runtime_el == EL0) ? 1: PLATFORM_CORE_COUNT;
+ info[partition_count].execution_ctx_count = execution_ctx_count;
+ info[partition_count].properties = sp_desc[index].properties;
partition_count++;
}
}
@@ -613,10 +245,9 @@ static uint64_t partition_info_get_handler(uint32_t smc_fid,
SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, partition_count);
}
-
static uint64_t direct_req_smc_handler(uint32_t smc_fid,
- bool secure_origin,
- uint64_t x1,
+ bool secure_origin,
+ uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
@@ -624,33 +255,100 @@ static uint64_t direct_req_smc_handler(uint32_t smc_fid,
void *handle,
uint64_t flags)
{
-
+ uint16_t dst_id = FFA_RECEIVER(x1);
el3_lp_desc_t *el3_lp_descs;
el3_lp_descs = get_el3_lp_array();
+ sp_desc_t *sp;
+ unsigned int idx;
- /* Handle is destined for a Logical Partition. */
- uint16_t dst_id = FFA_RECEIVER(x1);
+ /* Direct request is destined for a Logical Partition. */
for (int i = 0; i < MAX_EL3_LP_DESCS_COUNT; i++) {
if (el3_lp_descs[i].sp_id == dst_id) {
return el3_lp_descs[i].direct_req(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags);
}
}
+
+ /*
+ * If the request was not targeted to a LSP then it is invalid since a
+ * SP cannot call into the Normal world and there is no other SP to call
+ * into. If there are other SPs in future then the partition runtime
+ * model would need to be validated as well.
+ */
+ if (secure_origin)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ /* Check if the SP ID is valid */
+ sp = spmc_get_sp_ctx(dst_id);
+ if (NULL == sp)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ /*
+ * Check that the target execution context is in a waiting state before
+ * forwarding the direct request to it.
+ */
+ idx = get_ec_index(sp);
+ if (sp->ec[idx].rt_state != RT_STATE_WAITING)
+ return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+
+ /*
+ * Everything checks out so forward the request to the SP after updating
+ * its state and runtime model.
+ * TODO: Lock the context for a S-EL0 UP-M SP.
+ */
+ sp->ec[idx].rt_state = RT_STATE_RUNNING;
+ sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, handle, cookie, flags);
}
static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
- bool secure_origin,
- uint64_t x1,
- uint64_t x2,
- uint64_t x3,
- uint64_t x4,
- void *cookie,
- void *handle,
- uint64_t flags)
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
{
- /* Don't need to check LPs as they cannot send their own requests. */
+ sp_desc_t *sp;
+ unsigned int idx;
+
+ /* Check that the response did not originate from the Normal world */
+ if (!secure_origin)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ /*
+ * Check that the response is either targeted to the Normal world or the
+ * SPMC e.g. a PM response.
+ */
+ if ((FFA_RECEIVER(x1) != FFA_SPMC_ID) && (FFA_RECEIVER(x1) & FFA_SWLD_ID_MASK))
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ /* Obtain the SP descriptor and update its runtime state */
+ sp = spmc_get_sp_ctx(FFA_SENDER(x1));
+ if (NULL == sp)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ /* Sanity check that the state is being tracked correctly in the SPMC */
+ idx = get_ec_index(sp);
+ assert (sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /* Ensure that the SP execution context was in the right runtime model */
+ if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ)
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+
+ /* Update the state of the SP execution context */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
- /* TODO: Update state tracking? Clear on-going direct request / current state to idle. */
+ /*
+ * If the receiver is not the SPMC then forward the response to the
+ * Normal world.
+ */
+ if (FFA_RECEIVER(x1) == FFA_SPMC_ID) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here */
+ panic();
+ }
return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, handle, cookie, flags);
}
@@ -668,36 +366,49 @@ static uint64_t rxtx_map_handler(uint32_t smc_fid,
int ret;
uint32_t error_code;
uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
- spmc_sp_context_t *ctx = spmc_get_current_ctx(flags);
+ struct mailbox *mbox;
- spin_lock(&ctx->mailbox.lock);
+ /*
+ * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+ * indirect messaging with SPs. Check if the Hypervisor has invoked this
+ * ABI on behalf of a VM and reject it if this is the case.
+ * TODO: Check FF-A spec guidance on this scenario.
+ */
+ if (x1 == 0 || x2 == 0) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ /* Obtain the RX/TX buffer pair descriptor. */
+ mbox = spmc_get_mbox_desc(flags);
+
+ spin_lock(&mbox->lock);
/* Check if buffers have already been mapped. */
- if (ctx->mailbox.rx_buffer != 0 || ctx->mailbox.tx_buffer != 0) {
- WARN("%p %p\n", (void *) ctx->mailbox.rx_buffer, (void *)ctx->mailbox.tx_buffer);
+ if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
+ WARN("%p %p\n", (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
}
- ctx->mailbox.rxtx_page_count = x3 & 0x1F; /* Bits [5:0] */
+ mbox->rxtx_page_count = x3 & 0x1F; /* Bits [5:0] */
/* memmap the TX buffer as read only. */
ret = mmap_add_dynamic_region(x1, /* PA */
x1, /* VA */
- PAGE_SIZE * ctx->mailbox.rxtx_page_count, /* size */
+ PAGE_SIZE * mbox->rxtx_page_count, /* size */
mem_atts | MT_RO_DATA); /* attrs */
if (ret) {
/* Return the correct error code. */
error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY : FFA_ERROR_INVALID_PARAMETER;
WARN("Unable to map TX buffer: %d\n", error_code);
- ctx->mailbox.rxtx_page_count = 0;
+ mbox->rxtx_page_count = 0;
return spmc_ffa_error_return(handle, error_code);
}
- ctx->mailbox.tx_buffer = x1;
+ mbox->tx_buffer = x1;
/* memmap the RX buffer as read write. */
ret = mmap_add_dynamic_region(x2, /* PA */
x2, /* VA */
- PAGE_SIZE * ctx->mailbox.rxtx_page_count, /* size */
+ PAGE_SIZE * mbox->rxtx_page_count, /* size */
mem_atts | MT_RW_DATA); /* attrs */
if (ret) {
@@ -705,17 +416,17 @@ static uint64_t rxtx_map_handler(uint32_t smc_fid,
WARN("Unable to map RX buffer: %d\n", error_code);
goto err_unmap;
}
- ctx->mailbox.rx_buffer = x2;
- spin_unlock(&ctx->mailbox.lock);
+ mbox->rx_buffer = x2;
+ spin_unlock(&mbox->lock);
SMC_RET1(handle, FFA_SUCCESS_SMC32);
err_unmap:
/* Unmap the TX buffer again. */
- (void)mmap_remove_dynamic_region(ctx->mailbox.tx_buffer, PAGE_SIZE * ctx->mailbox.rxtx_page_count);
- ctx->mailbox.tx_buffer = 0;
- ctx->mailbox.rxtx_page_count = 0;
- spin_unlock(&ctx->mailbox.lock);
+ (void)mmap_remove_dynamic_region(mbox->tx_buffer, PAGE_SIZE * mbox->rxtx_page_count);
+ mbox->tx_buffer = 0;
+ mbox->rxtx_page_count = 0;
+ spin_unlock(&mbox->lock);
return spmc_ffa_error_return(handle, error_code);
}
@@ -730,25 +441,35 @@ static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
void *handle,
uint64_t flags)
{
- spmc_sp_context_t *ctx = spmc_get_current_ctx(flags);
+ struct mailbox *mbox = spmc_get_mbox_desc(flags);
- spin_lock(&ctx->mailbox.lock);
+ /*
+ * The SPMC does not support mapping of VM RX/TX pairs to facilitate
+ * indirect messaging with SPs. Check if the Hypervisor has invoked this
+ * ABI on behalf of a VM and reject it if this is the case.
+ * TODO: Check FF-A spec guidance on this scenario.
+ */
+ if (x1 != 0) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+ }
+
+ spin_lock(&mbox->lock);
/* Check if buffers have already been mapped. */
- if (ctx->mailbox.rx_buffer != 0 || ctx->mailbox.tx_buffer != 0) {
- spin_unlock(&ctx->mailbox.lock);
+ if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
+ spin_unlock(&mbox->lock);
return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
}
/* unmap RX Buffer */
- (void)mmap_remove_dynamic_region(ctx->mailbox.rx_buffer, PAGE_SIZE * ctx->mailbox.rxtx_page_count);
- ctx->mailbox.rx_buffer = 0;
+ (void)mmap_remove_dynamic_region(mbox->rx_buffer, PAGE_SIZE * mbox->rxtx_page_count);
+ mbox->rx_buffer = 0;
/* unmap TX Buffer */
- (void)mmap_remove_dynamic_region(ctx->mailbox.tx_buffer, PAGE_SIZE * ctx->mailbox.rxtx_page_count);
- ctx->mailbox.tx_buffer = 0;
+ (void)mmap_remove_dynamic_region(mbox->tx_buffer, PAGE_SIZE * mbox->rxtx_page_count);
+ mbox->tx_buffer = 0;
- spin_unlock(&ctx->mailbox.lock);
+ spin_unlock(&mbox->lock);
SMC_RET1(handle, FFA_SUCCESS_SMC32);
}
@@ -783,7 +504,8 @@ static uint64_t ffa_features_handler(uint32_t smc_fid,
case FFA_RXTX_UNMAP:
case FFA_MSG_RUN:
case FFA_MSG_WAIT:
- SMC_RET1(handle, FFA_SUCCESS_SMC32);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC64);
default:
return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
@@ -797,14 +519,14 @@ static uint64_t ffa_features_handler(uint32_t smc_fid,
}
static uint64_t ffa_version_handler(uint32_t smc_fid,
- bool secure_origin,
- uint64_t x1,
- uint64_t x2,
- uint64_t x3,
- uint64_t x4,
- void *cookie,
- void *handle,
- uint64_t flags)
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
{
/*
* Ensure that both major and minor revision representation occupies at
@@ -819,8 +541,8 @@ static uint64_t ffa_version_handler(uint32_t smc_fid,
}
SMC_RET1(handle,
- FFA_VERSION_MAJOR << FFA_VERSION_MAJOR_SHIFT |
- FFA_VERSION_MINOR);
+ FFA_VERSION_MAJOR << FFA_VERSION_MAJOR_SHIFT |
+ FFA_VERSION_MINOR);
}
static uint64_t ffa_id_get_handler(uint32_t smc_fid,
@@ -833,8 +555,28 @@ static uint64_t ffa_id_get_handler(uint32_t smc_fid,
void *handle,
uint64_t flags)
{
- spmc_sp_context_t *ctx = spmc_get_current_ctx(flags);
- SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, ctx->sp_id);
+ if (is_caller_secure(flags)) {
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, spmc_get_current_sp_ctx()->sp_id);
+ } else {
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, spmc_get_hyp_ctx()->ns_ep_id);
+ }
+}
+
+static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ if (is_caller_secure(flags)) {
+ SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
+ } else {
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
}
static uint64_t ffa_run_handler(uint32_t smc_fid,
@@ -847,22 +589,123 @@ static uint64_t ffa_run_handler(uint32_t smc_fid,
void *handle,
uint64_t flags)
{
+ sp_desc_t *sp;
+ unsigned int idx, *rt_state, *rt_model;
/* Can only be called from the normal world. */
if (secure_origin) {
spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
}
- /* Cannot run "primary" partition. */
- if (FFA_RUN_TARGET(x1) == NWLD_CTX_ID) {
+ /* Cannot run a Normal world partition. */
+ if (!(FFA_RUN_TARGET(x1) & FFA_SWLD_ID_MASK)) {
spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
}
- /* TODO: Add verification are we running on the correct vcpu. */
+ /*
+ * Check that the context is not already running on a different
+ * cpu. This is valid only for a S-EL SP.
+ */
+ sp = spmc_get_sp_ctx(FFA_RUN_TARGET(x1));
+ if (NULL == sp)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ idx = get_ec_index(sp);
+ rt_state = &sp->ec[idx].rt_state;
+ rt_model = &sp->ec[idx].rt_model;
+ if (*rt_state == RT_STATE_RUNNING)
+ return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
+
+ /*
+ * Sanity check that if the execution context was not waiting then it
+ * was either in the direct request or the run partition runtime model.
+ */
+ if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED)
+ assert(*rt_model == RT_MODEL_RUN || *rt_model == RT_MODEL_DIR_REQ);
+
+ /*
+ * If the context was waiting then update the partition runtime model.
+ */
+ if (*rt_state == RT_STATE_WAITING)
+ *rt_model = RT_MODEL_RUN;
+
+ /*
+ * Forward the request to the correct SP vCPU after updating
+ * its state.
+ * TODO: Lock the context in case of a S-EL0 UP-M SP.
+ */
+ *rt_state = RT_STATE_RUNNING;
return spmc_smc_return(smc_fid, secure_origin, FFA_RUN_TARGET(x1), 0, 0, 0, handle, cookie, flags);
}
+static uint64_t msg_wait_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ sp_desc_t *sp;
+ unsigned int idx;
+
+ /* Check that the response did not originate from the Normal world */
+ if (!secure_origin)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ /*
+ * Get the descriptor of the SP that invoked FFA_MSG_WAIT.
+ */
+ sp = spmc_get_current_sp_ctx();
+ if (NULL == sp)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
+
+ /*
+ * Get the execution context of the SP that invoked FFA_MSG_WAIT.
+ */
+ idx = get_ec_index(sp);
+
+ /* Ensure that the SP execution context was in the right runtime model */
+ if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ)
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+
+ /* Sanity check that the state is being tracked correctly in the SPMC */
+ idx = get_ec_index(sp);
+ assert (sp->ec[idx].rt_state == RT_STATE_RUNNING);
+
+ /*
+ * Perform a synchronous exit if the partition was initialising. The
+ * state is updated after the exit.
+ */
+ if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
+ spmc_sp_synchronous_exit(&sp->ec[idx], x4);
+ /* Should not get here */
+ panic();
+ }
+
+ /* Update the state of the SP execution context */
+ sp->ec[idx].rt_state = RT_STATE_WAITING;
+
+ /* Resume normal world if a secure interrupt was handled */
+ if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
+ unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
+ unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
+
+ assert(secure_state_in == SECURE);
+ assert(secure_state_out == NON_SECURE);
+
+ cm_el1_sysregs_context_save(secure_state_in);
+ cm_el1_sysregs_context_restore(secure_state_out);
+ cm_set_next_eret_context(secure_state_out);
+ SMC_RET0(cm_get_context(secure_state_out));
+ }
+ /* Forward the response to the Normal world */
+ return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4, handle, cookie, flags);
+}
+
static uint64_t rx_release_handler(uint32_t smc_fid,
bool secure_origin,
uint64_t x1,
@@ -872,54 +715,507 @@ static uint64_t rx_release_handler(uint32_t smc_fid,
void *cookie,
void *handle,
uint64_t flags)
+{ struct mailbox *mbox = spmc_get_mbox_desc(flags);
+
+ spin_lock(&mbox->lock);
+ if (mbox->state != MAILBOX_STATE_FULL) {
+ return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
+ }
+ mbox->state = MAILBOX_STATE_EMPTY;
+ spin_unlock(&mbox->lock);
+
+ SMC_RET1(handle, FFA_SUCCESS_SMC32);
+}
+
+/*******************************************************************************
+ * spmc_pm_secondary_ep_register
+ ******************************************************************************/
+static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
{
+ sp_desc_t *sp;
+
+ /*
+ * This request cannot originate from the Normal world.
+ */
+ if (!secure_origin)
+ return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
- spmc_sp_context_t *ctx = spmc_get_current_ctx(flags);
+ /* Get the context of the current SP. */
+ sp = spmc_get_current_sp_ctx();
+ if (NULL == sp)
+ return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
- spin_lock(&ctx->mailbox.lock);
- if (ctx->mailbox.state != MAILBOX_STATE_FULL) {
+ /*
+ * A S-EL0 SP has no business invoking this ABI.
+ */
+ if (sp->runtime_el == EL0) {
return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
}
- ctx->mailbox.state = MAILBOX_STATE_EMPTY;
- spin_unlock(&ctx->mailbox.lock);
+
+ /*
+ * Lock and update the secondary entrypoint in SP context.
+ * TODO: Sanity check the entrypoint even though it does not matter
+ * since there is no isolation between EL3 and S-EL1.
+ */
+ spin_lock(&sp->secondary_ep_lock);
+ sp->secondary_ep = x1;
+ VERBOSE("%s %lx\n", __func__, sp->secondary_ep);
+ spin_unlock(&sp->secondary_ep_lock);
SMC_RET1(handle, FFA_SUCCESS_SMC32);
}
/*******************************************************************************
- * SPMC Helper Functions
+ * This function will parse the Secure Partition Manifest for fetching seccure
+ * partition specific memory region details. It will find base address, size,
+ * memory attributes for each memory region and then add the respective region
+ * into secure parition's translation context.
+ ******************************************************************************/
+static void populate_sp_mem_regions(sp_desc_t *sp,
+ void *sp_manifest,
+ int node)
+{
+ uintptr_t base_address, size;
+ uint32_t mem_attr, granularity, mem_region;
+ struct mmap_region sp_mem_regions;
+ int32_t offset, ret;
+
+ for (offset = fdt_first_subnode(sp_manifest, node), mem_region = 0;
+ offset >= 0;
+ offset = fdt_next_subnode(sp_manifest, offset), mem_region++) {
+ if (offset < 0)
+ WARN("Error happened in SPMC manifest bootargs reading\n");
+ else {
+ ret = fdt_get_reg_props_by_index(sp_manifest, offset,
+ 0, &base_address,
+ &size);
+ if (ret < 0) {
+ WARN("Missing reg property for Mem region %u.\n", mem_region);
+ continue;
+ }
+
+ ret = fdt_read_uint32(sp_manifest,
+ offset, "mem_region_access",
+ &mem_attr);
+ if (ret < 0) {
+ WARN("Missing Mem region %u access attributes.\n", mem_region);
+ continue;
+ }
+
+ sp_mem_regions.attr = MT_USER;
+ if (mem_attr == MEM_CODE)
+ sp_mem_regions.attr |= MT_CODE;
+ else if (mem_attr == MEM_RO_DATA)
+ sp_mem_regions.attr |= MT_RO_DATA;
+ else if (mem_attr == MEM_RW_DATA)
+ sp_mem_regions.attr |= MT_RW_DATA;
+ else if (mem_attr == MEM_RO)
+ sp_mem_regions.attr |= MT_RO;
+ else if (mem_attr == MEM_RW)
+ sp_mem_regions.attr |= MT_RW;
+
+ ret = fdt_read_uint32(sp_manifest,
+ offset, "mem_region_type",
+ &mem_attr);
+ if (ret < 0) {
+ WARN("Missing Mem region %u type.\n", mem_region);
+ continue;
+ }
+
+ if (mem_attr == MEM_DEVICE)
+ sp_mem_regions.attr |= MT_DEVICE;
+ else if (mem_attr == MEM_NON_CACHE)
+ sp_mem_regions.attr |= MT_NON_CACHEABLE;
+ else if (mem_attr == MEM_NORMAL)
+ sp_mem_regions.attr |= MT_MEMORY;
+
+ ret = fdt_read_uint32(sp_manifest,
+ offset,
+ "mem_region_secure",
+ &mem_attr);
+ if (ret < 0) {
+ WARN("Missing Mem region %u secure state.\n", mem_region);
+ continue;
+ }
+
+ if (mem_attr == MEM_SECURE)
+ sp_mem_regions.attr |= MT_SECURE;
+ else if (mem_attr == MEM_NON_SECURE)
+ sp_mem_regions.attr |= MT_NS;
+
+ ret = fdt_read_uint32(sp_manifest,
+ offset, "granularity",
+ &granularity);
+ if (ret < 0) {
+ WARN("Missing Mem region %u granularity.\n", mem_region);
+ continue;
+ }
+ sp_mem_regions.base_pa = base_address;
+ sp_mem_regions.base_va = base_address;
+ sp_mem_regions.size = size;
+ sp_mem_regions.granularity = granularity;
+ mmap_add_region_ctx(sp->xlat_ctx_handle,
+ &sp_mem_regions);
+ }
+ }
+}
+
+/*
+ * Convert from the traditional TF-A representation of a UUID,
+ * big endian uint8 to little endian uint32 to be inline
+ * with FF-A.
+ */
+void convert_uuid_endian(uint8_t *be_8, uint32_t *le_32) {
+ for (int i = 0; i < 4; i++){
+ le_32[i] = be_8[(i*4)+0] << 24 |
+ be_8[(i*4)+1] << 16 |
+ be_8[(i*4)+2] << 8 |
+ be_8[(i*4)+3] << 0;
+ }
+}
+
+/*******************************************************************************
+ * This function will parse the Secure Partition Manifest. From manifest, it
+ * will fetch details for preparing Secure partition image context and secure
+ * partition image boot arguments if any. Also if there are memory regions
+ * present in secure partition manifest then it will invoke function to map
+ * respective memory regions.
+ ******************************************************************************/
+static int sp_manifest_parse(void *sp_manifest, int offset,
+ sp_desc_t *sp,
+ entry_point_info_t *ep_info)
+{
+ int32_t ret, node;
+ uint64_t config;
+ uint32_t config_32;
+ uint8_t be_uuid[16];
+
+ /*
+ * Look for the mandatory fields that are expected to be present in
+ * both S-EL1 and S-EL0 SP manifests.
+ */
+ node = fdt_subnode_offset_namelen(sp_manifest, offset,
+ "ffa-config",
+ sizeof("ffa-config") - 1);
+ if (node < 0) {
+ ERROR("Not found any ffa-config for SP.\n");
+ return node;
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "runtime-el", &config_32);
+ if (ret) {
+ ERROR("Missing SP Runtime EL information.\n");
+ return ret;
+ } else
+ sp->runtime_el = config_32;
+
+ ret = fdtw_read_uuid(sp_manifest, node, "uuid", 16,
+ be_uuid);
+ if (ret) {
+ ERROR("Missing Secure Partition UUID.\n");
+ return ret;
+ } else {
+ /* Convert from BE to LE to store internally. */
+ convert_uuid_endian(be_uuid, sp->uuid);
+ }
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "ffa-version", &config_32);
+ if (ret) {
+ ERROR("Missing Secure Partition FFA Version.\n");
+ return ret;
+ } else
+ sp->ffa_version = config_32;
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "execution-state", &config_32);
+ if (ret) {
+ ERROR("Missing Secure Partition Execution State.\n");
+ return ret;
+ } else
+ sp->execution_state = config_32;
+
+ /*
+ * Look for the optional fields that are expected to be present in
+ * both S-EL1 and S-EL0 SP manifests.
+ */
+
+ ret = fdt_read_uint32(sp_manifest, node,
+ "partition_id", &config_32);
+ if (ret)
+ WARN("Missing Secure Partition ID.\n");
+ else
+ sp->sp_id = config_32;
+
+ ret = fdt_read_uint64(sp_manifest, node,
+ "load_address", &config);
+ if (ret)
+ WARN("Missing Secure Partition Entry Point.\n");
+ else
+ ep_info->pc = config;
+
+ /*
+ * Look for the mandatory fields that are expected to be present in only
+ * a StMM S-EL0 SP manifest. We are assuming deployment of only a single
+ * StMM SP with the EL3 SPMC for now.
+ */
+ if (sp->runtime_el == EL0) {
+ ret = fdt_read_uint64(sp_manifest, node,
+ "sp_arg0", &config);
+ if (ret) {
+ ERROR("Missing Secure Partition arg0.\n");
+ return ret;
+ } else
+ ep_info->args.arg0 = config;
+
+ ret = fdt_read_uint64(sp_manifest, node,
+ "sp_arg1", &config);
+ if (ret) {
+ ERROR("Missing Secure Partition arg1.\n");
+ return ret;
+ } else
+ ep_info->args.arg1 = config;
+
+ ret = fdt_read_uint64(sp_manifest, node,
+ "sp_arg2", &config);
+ if (ret) {
+ ERROR("Missing Secure Partition arg2.\n");
+ return ret;
+ } else
+ ep_info->args.arg2 = config;
+
+ ret = fdt_read_uint64(sp_manifest, node,
+ "sp_arg3", &config);
+ if (ret) {
+ ERROR("Missing Secure Partition arg3.\n");
+ return ret;
+ } else
+ ep_info->args.arg3 = config;
+
+ ret = fdt_read_uint64(sp_manifest, node,
+ "stack_base", &config);
+ if (ret) {
+ ERROR("Missing Secure Partition Stack Base.\n");
+ return ret;
+ } else
+ sp->sp_stack_base = config;
+
+ ret = fdt_read_uint64(sp_manifest, node,
+ "stack_size", &config);
+ if (ret) {
+ ERROR("Missing Secure Partition Stack Size.\n");
+ return ret;
+ } else
+ sp->sp_stack_size = config;
+ }
+
+ node = fdt_subnode_offset_namelen(sp_manifest, offset,
+ "mem-regions",
+ sizeof("mem-regions") - 1);
+ if (node < 0)
+ WARN("Not found mem-region configuration for SP.\n");
+ else {
+ populate_sp_mem_regions(sp, sp_manifest, node);
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function gets the Secure Partition Manifest base and maps the manifest
+ * region.
+ * Currently, one Secure partition manifest is considered and prepared the
+ * Secure Partition context for the same.
+ *
******************************************************************************/
+static int find_and_prepare_sp_context(void)
+{
+ void *sp_manifest;
+ uintptr_t manifest_base, manifest_base_align;
+ entry_point_info_t *next_image_ep_info;
+ int32_t ret;
+ sp_desc_t *sp;
+ entry_point_info_t ep_info = {0};
+
+ next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
+ if (next_image_ep_info == NULL) {
+ WARN("TEST: No Secure Partition image provided by BL2\n");
+ return -ENOENT;
+ }
-void spmc_set_config_addr(uintptr_t soc_fw_config)
+ sp_manifest = (void *)next_image_ep_info->args.arg0;
+ if (sp_manifest == NULL) {
+ WARN("Secure Partition(SP) manifest absent\n");
+ return -ENOENT;
+ }
+
+ manifest_base = (uintptr_t)sp_manifest;
+ manifest_base_align = page_align(manifest_base, UP);
+ manifest_base_align = page_align(manifest_base, DOWN);
+
+ /* Map the secure partition manifest region in the EL3 translation regime.
+ * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
+ * alignment the region of 1 PAGE_SIZE from manifest align base may not
+ * completely accommodate the secure partition manifest region.
+ */
+ ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
+ manifest_base_align,
+ PAGE_SIZE * 2,
+ MT_RO_DATA);
+ if (ret != 0) {
+ ERROR("Error while mapping SP manifest (%d).\n", ret);
+ return ret;
+ }
+
+ ret = fdt_node_offset_by_compatible(sp_manifest, -1, "arm,ffa-manifest");
+ if (ret < 0) {
+ ERROR("Error happened in SP manifest reading.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate an SP descriptor for initialising the partition's execution
+ * context on the primary CPU.
+ */
+ sp = &(sp_desc[ACTIVE_SP_DESC_INDEX]);
+
+ /* Assign translation tables context. */
+ sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
+
+ /* Initialize entry point information for the SP */
+ SET_PARAM_HEAD(&ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE);
+
+ /* Parse the SP manifest. */
+ ret = sp_manifest_parse(sp_manifest, ret, sp, &ep_info);
+ if (ret) {
+ ERROR(" Error in Secure Partition(SP) manifest parsing.\n");
+ return ret;
+ }
+
+ /* Check that the runtime EL in the manifest was correct */
+ if (sp->runtime_el != EL0 && sp->runtime_el != EL1) {
+ ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
+ return -EINVAL;
+ }
+
+ /* Perform any initialisation common to S-EL0 and S-EL1 SP */
+ spmc_sp_common_setup(sp, &ep_info);
+
+ /* Perform any initialisation specific to S-EL0 or S-EL1 SP */
+ if (sp->runtime_el == 0)
+ spmc_el0_sp_setup(sp, &ep_info);
+ else
+ spmc_el1_sp_setup(sp, &ep_info);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * This function takes an SP context pointer and performs a synchronous entry
+ * into it.
+ ******************************************************************************/
+uint64_t spmc_sp_synchronous_entry(sp_exec_ctx_t *ec)
+{
+ uint64_t rc;
+
+ assert(ec != NULL);
+
+ /* Assign the context of the SP to this CPU */
+ cm_set_context(&(ec->cpu_ctx), SECURE);
+
+ /* Restore the context assigned above */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /* Invalidate TLBs at EL1. */
+ tlbivmalle1();
+ dsbish();
+
+ /* Enter Secure Partition */
+ rc = spm_secure_partition_enter(&ec->c_rt_ctx);
+
+ /* Save secure state */
+ cm_el1_sysregs_context_save(SECURE);
+
+ return rc;
+}
+
+static int32_t logical_sp_init(void)
{
- spmc_manifest = (void *)soc_fw_config;
+ uint64_t rc = 0;
+
+ el3_lp_desc_t *el3_lp_descs;
+ el3_lp_descs = get_el3_lp_array();
+
+ INFO("Logical Secure Partition init start.\n");
+ /* TODO: do some initialistion. */
+ for (int i = 0; i < EL3_LP_DESCS_NUM; i++) {
+ el3_lp_descs[i].init();
+ }
+
+ INFO("Secure Partition initialized.\n");
+
+ return rc;
}
-void *spmc_get_config_addr(void)
+/*******************************************************************************
+ * SPMC Helper Functions
+ ******************************************************************************/
+static int32_t sp_init(void)
{
- return ((void *)spmc_manifest);
+ uint64_t rc;
+ sp_desc_t *sp;
+ sp_exec_ctx_t *ec;
+
+ sp = &(sp_desc[ACTIVE_SP_DESC_INDEX]);
+ ec = &sp->ec[get_ec_index(sp)];
+ ec->rt_model = RT_MODEL_INIT;
+ ec->rt_state = RT_STATE_RUNNING;
+
+ INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
+
+ rc = spmc_sp_synchronous_entry(ec);
+ assert(rc == 0);
+
+ ERROR("S-EL1 SP context on core%u is in %u state\n", get_ec_index(sp), ec->rt_state);
+ ec->rt_state = RT_STATE_WAITING;
+ ERROR("S-EL1 SP context on core%u is in %u state\n", get_ec_index(sp), ec->rt_state);
+
+ INFO("Secure Partition initialized.\n");
+
+ return !rc;
}
-void initalize_sp_ctx(void) {
- spmc_sp_context_t *ctx;
- for (int i = 0; i < SECURE_PARTITION_COUNT; i ++) {
- ctx = &spmc_sp_ctx[i];
- ctx->sp_id = INVALID_PARTITION_ID;
- ctx->mailbox.rx_buffer = 0;
- ctx->mailbox.tx_buffer = 0;
- ctx->mailbox.state = MAILBOX_STATE_EMPTY;
+static void initalize_sp_descs(void) {
+ sp_desc_t *sp;
+ for (int i = 0; i < SECURE_PARTITION_COUNT; i++) {
+ sp = &sp_desc[i];
+ sp->sp_id = INV_SP_ID;
+ sp->mailbox.rx_buffer = 0;
+ sp->mailbox.tx_buffer = 0;
+ sp->mailbox.state = MAILBOX_STATE_EMPTY;
+ sp->secondary_ep = 0;
}
}
-void initalize_nwld_ctx(void) {
- spmc_sp_context_t *ctx;
- for (int i = 0; i < NWLD_PARTITION_COUNT; i ++) {
- ctx = &spmc_nwld_ctx[i];
- // Initialise first entry to Nwld component with ID 0.
- ctx->sp_id = i ? INVALID_PARTITION_ID : 0;
- ctx->mailbox.rx_buffer = 0;
- ctx->mailbox.tx_buffer = 0;
- ctx->mailbox.state = MAILBOX_STATE_EMPTY;
+static void initalize_ns_ep_descs(void) {
+ ns_ep_desc_t *ns_ep;
+ for (int i = 0; i < NS_PARTITION_COUNT; i++) {
+ ns_ep = &ns_ep_desc[i];
+ /* Clashes with the Hypervisor ID but wil not be a problem in practice */
+ ns_ep->ns_ep_id = 0;
+ ns_ep->mailbox.rx_buffer = 0;
+ ns_ep->mailbox.tx_buffer = 0;
+ ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
}
}
@@ -929,10 +1225,11 @@ void initalize_nwld_ctx(void) {
int32_t spmc_setup(void)
{
int32_t ret;
+ uint32_t flags;
- /* Initialize partiton ctxs. */
- initalize_sp_ctx();
- initalize_nwld_ctx();
+ /* Initialize endpoint descriptors */
+ initalize_sp_descs();
+ initalize_ns_ep_descs();
/* Setup logical SPs. */
logical_sp_init();
@@ -974,6 +1271,8 @@ uint64_t spmc_smc_handler(uint32_t smc_fid,
{
VERBOSE("SPMC: 0x%x 0x%llx 0x%llx 0x%llx 0x%llx\n", smc_fid, x1, x2, x3, x4);
switch (smc_fid) {
+ case FFA_SPM_ID_GET:
+ return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags);
case FFA_ID_GET:
return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags);
case FFA_FEATURES:
@@ -1002,13 +1301,13 @@ uint64_t spmc_smc_handler(uint32_t smc_fid,
return rx_release_handler(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags);
case FFA_MSG_WAIT:
- /* Check if SP init call. */
- if (secure_origin && spmc_sp_ctx[schedule_sp_index].sp_ctx.state == SP_STATE_RESET) {
- spm_sp_synchronous_exit(&(spmc_sp_ctx[schedule_sp_index].sp_ctx), x4);
- }
- /* TODO: Validate this is a valid call in partitions current state. */
- /* Else forward to SPMD. */
- return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
+ /*
+ * Normal world cannot call this into the Secure world.
+ */
+ if (!secure_origin)
+ break;
+
+ return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags);
case FFA_MSG_RUN:
return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4, cookie, handle, flags);
diff --git a/services/std_svc/spmd/spmd_main.c b/services/std_svc/spmd/spmd_main.c
index c087c2eafe..cb8950d170 100644
--- a/services/std_svc/spmd/spmd_main.c
+++ b/services/std_svc/spmd/spmd_main.c
@@ -33,9 +33,19 @@
static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
/*******************************************************************************
- * SPM Core attribute information read from its manifest.
+ * SPM Core attribute information is read from its manifest if the SPMC is not
+ * at EL3. Else, it is initialised statically.
******************************************************************************/
+#if SPMC_AT_EL3
+static spmc_manifest_attribute_t spmc_attrs = {
+ .major_version = FFA_VERSION_MAJOR,
+ .minor_version = FFA_VERSION_MINOR,
+ .exec_state = MODE_RW_64,
+ .spmc_id = 0x8000,
+};
+#else
static spmc_manifest_attribute_t spmc_attrs;
+#endif
/*******************************************************************************
* SPM Core entry point information. Discovered on the primary core and reused
@@ -306,48 +316,19 @@ static int spmd_spmc_init(void *pm_addr)
******************************************************************************/
int spmd_setup(void)
{
- uint64_t rc;
+ int rc;
void *spmc_manifest;
+ /*
+ * If the SPMC is at EL3, then just initialise it directly. The
+ * shenanigans of when it is at a lower EL are not needed.
+ */
if (is_spmc_at_el3()) {
- spmd_spm_core_context_t *ctx = spmd_get_context();
- unsigned int linear_id = plat_my_core_pos();
- unsigned int core_id;
-
- VERBOSE("SPM Core init start.\n");
-
- ctx->state = SPMC_STATE_ON_PENDING;
-
- /* Set the SPMC context state on other CPUs to OFF */
- for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
- if (core_id != linear_id)
- spm_core_context[core_id].state = SPMC_STATE_OFF;
- }
-
- /* Get SPMC manifest address */
- spmc_manifest = spmc_get_config_addr();
- if (spmc_manifest == NULL) {
- ERROR("Invalid or absent SPM Core manifest.\n");
- return -EINVAL;
- }
-
- /* Load the SPM Core manifest */
- rc = plat_spm_core_manifest_load(&spmc_attrs, spmc_manifest);
- if (rc != 0) {
- WARN("No or invalid SPM Core manifest provided by BL2\n");
- return rc;
- }
-
rc = spmc_setup();
- if (rc != 0ULL) {
- ERROR("SPMC initialisation failed 0x%llx\n", rc);
- return 0;
+ if (rc != 0) {
+ ERROR("SPMC initialisation failed 0x%llx\n", (unsigned long long) rc);
}
-
- ctx->state = SPMC_STATE_ON;
-
- VERBOSE("SPM Core init end.\n");
- return 0;
+ return rc;
}
spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
@@ -382,20 +363,15 @@ int spmd_setup(void)
* Forward FFA SMCs to the other security state
******************************************************************************/
uint64_t ffa_smc_forward(uint32_t smc_fid,
- bool secure_origin,
- uint64_t x1,
- uint64_t x2,
- uint64_t x3,
- uint64_t x4,
- void *cookie,
- void *handle,
- uint64_t flags)
+ bool secure_origin,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
{
- if (is_spmc_at_el3()) {
- return spmc_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
- handle, flags);
- }
-
unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
@@ -551,7 +527,7 @@ uint64_t spmd_smc_handler(uint32_t smc_fid,
* Sanity check to "input_version".
*/
if ((input_version & FFA_VERSION_BIT31_MASK) ||
- (ctx->state == SPMC_STATE_RESET)) {
+ (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
ret = FFA_ERROR_NOT_SUPPORTED;
} else if (!secure_origin) {
ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
diff --git a/services/std_svc/spmd/spmd_private.h b/services/std_svc/spmd/spmd_private.h
index 6d51a58e07..63fd9aa0ed 100644
--- a/services/std_svc/spmd/spmd_private.h
+++ b/services/std_svc/spmd/spmd_private.h
@@ -44,7 +44,8 @@ typedef enum spmc_state {
/*
* Data structure used by the SPM dispatcher (SPMD) in EL3 to track context of
- * the SPM core (SPMC) at the next lower EL.
+ * the SPM core (SPMC) at the next lower EL. When the SPMC is at EL3, only the
+ * state field is used.
*/
typedef struct spmd_spm_core_context {
uint64_t c_rt_ctx;