feat(smc): add SMCCCv1.3 sve hint bit support in tftf framework
TFTF smc library uses SVE field in trap register to represent SVE
hint flag.
Testcase has to explicitly set this bit using the helper routine
tftf_smc_set_sve_hint(). When set to true, denotes absence of SVE
specific live state on the CPU that implements SVE. Once set to true,
SVE will be disabled in trap register and any SMC made using tftf_smc()
will set FUNCID_SVE_HINT in the SMC function ID.
Signed-off-by: Arunachalam Ganapathy <arunachalam.ganapathy@arm.com>
Change-Id: I13055fe4102cc4e35af1d7091e88327a21778835
diff --git a/include/lib/extensions/sve.h b/include/lib/extensions/sve.h
index 5670afc..b2cd2a6 100644
--- a/include/lib/extensions/sve.h
+++ b/include/lib/extensions/sve.h
@@ -56,8 +56,11 @@
typedef uint8_t sve_ffr_regs_t[SVE_NUM_FFR_REGS * SVE_FFR_REG_LEN_BYTES]
__aligned(16);
+uint64_t sve_rdvl_1(void);
void sve_config_vq(uint8_t sve_vq);
uint32_t sve_probe_vl(uint8_t sve_max_vq);
+uint64_t sve_read_zcr_elx(void);
+void sve_write_zcr_elx(uint64_t rval);
void sve_z_regs_write(const sve_z_regs_t *z_regs);
void sve_z_regs_write_rand(sve_z_regs_t *z_regs);
@@ -83,23 +86,5 @@
void sve_subtract_arrays(int *dst_array, int *src_array1, int *src_array2,
int array_size);
-#ifdef __aarch64__
-
-/* Returns the SVE implemented VL in bytes (constrained by ZCR_EL3.LEN) */
-static inline uint64_t sve_rdvl_1(void)
-{
- uint64_t vl;
-
- __asm__ volatile(
- ".arch_extension sve\n"
- "rdvl %0, #1;"
- ".arch_extension nosve\n"
- : "=r" (vl)
- );
-
- return vl;
-}
-
-#endif /* __aarch64__ */
#endif /* __ASSEMBLY__ */
#endif /* SVE_H */
diff --git a/include/lib/tftf_lib.h b/include/lib/tftf_lib.h
index d265bb9..8eff7fc 100644
--- a/include/lib/tftf_lib.h
+++ b/include/lib/tftf_lib.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -159,6 +159,21 @@
*/
smc_ret_values tftf_smc(const smc_args *args);
+/* Assembler routine to trigger a SMC call. */
+smc_ret_values asm_tftf_smc64(uint32_t fid, u_register_t arg1, u_register_t arg2,
+ u_register_t arg3, u_register_t arg4,
+ u_register_t arg5, u_register_t arg6,
+ u_register_t arg7);
+
+/*
+ * Update the SVE hint for the current CPU. Any SMC call made through tftf_smc
+ * will update the SVE hint bit in the SMC Function ID.
+ */
+void tftf_smc_set_sve_hint(bool sve_hint_flag);
+
+/* Return the SVE hint bit value for the current CPU */
+bool tftf_smc_get_sve_hint(void);
+
/*
* Trigger an HVC call.
*/
diff --git a/include/runtime_services/smccc.h b/include/runtime_services/smccc.h
index 283b463..b898138 100644
--- a/include/runtime_services/smccc.h
+++ b/include/runtime_services/smccc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -34,16 +34,19 @@
#define FUNCID_TYPE_SHIFT 31
#define FUNCID_CC_SHIFT 30
#define FUNCID_OEN_SHIFT 24
+#define FUNCID_SVE_HINT_SHIFT 16
#define FUNCID_NUM_SHIFT 0
#define FUNCID_TYPE_MASK 0x1
#define FUNCID_CC_MASK 0x1
#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_SVE_HINT_MASK 0x1
#define FUNCID_NUM_MASK 0xffff
#define FUNCID_TYPE_WIDTH 1
#define FUNCID_CC_WIDTH 1
#define FUNCID_OEN_WIDTH 6
+#define FUNCID_SVE_HINT_WIDTH 1
#define FUNCID_NUM_WIDTH 16
#define SMC_64 1
diff --git a/lib/extensions/sve/aarch64/sve.c b/lib/extensions/sve/aarch64/sve.c
index 8811af1..2c0e38f 100644
--- a/lib/extensions/sve/aarch64/sve.c
+++ b/lib/extensions/sve/aarch64/sve.c
@@ -8,50 +8,124 @@
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
+#include <lib/extensions/fpu.h>
#include <lib/extensions/sve.h>
+#include <tftf_lib.h>
-static inline uint64_t sve_read_zcr_elx(void)
-{
- return IS_IN_EL2() ? read_zcr_el2() : read_zcr_el1();
-}
+static uint8_t zero_mem[512];
-static inline void sve_write_zcr_elx(uint64_t reg_val)
-{
- if (IS_IN_EL2()) {
- write_zcr_el2(reg_val);
- } else {
- write_zcr_el1(reg_val);
- }
+#define sve_traps_save_disable(flags) \
+ do { \
+ if (IS_IN_EL2()) { \
+ flags = read_cptr_el2(); \
+ write_cptr_el2(flags & ~(CPTR_EL2_TZ_BIT)); \
+ } else { \
+ flags = read_cpacr_el1(); \
+ write_cpacr_el1(flags | \
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE));\
+ } \
+ isb(); \
+ } while (false)
- isb();
-}
+#define sve_traps_restore(flags) \
+ do { \
+ if (IS_IN_EL2()) { \
+ write_cptr_el2(flags); \
+ } else { \
+ write_cpacr_el1(flags); \
+ } \
+ isb(); \
+ } while (false)
-static void _sve_config_vq(uint8_t sve_vq)
+static void config_vq(uint8_t sve_vq)
{
u_register_t zcr_elx;
- zcr_elx = sve_read_zcr_elx();
if (IS_IN_EL2()) {
+ zcr_elx = read_zcr_el2();
zcr_elx &= ~(MASK(ZCR_EL2_SVE_VL));
zcr_elx |= INPLACE(ZCR_EL2_SVE_VL, sve_vq);
+ write_zcr_el2(zcr_elx);
} else {
+ zcr_elx = read_zcr_el1();
zcr_elx &= ~(MASK(ZCR_EL1_SVE_VL));
zcr_elx |= INPLACE(ZCR_EL1_SVE_VL, sve_vq);
+ write_zcr_el1(zcr_elx);
}
- sve_write_zcr_elx(zcr_elx);
+ isb();
+}
+
+/* Returns the SVE implemented VL in bytes (constrained by ZCR_EL3.LEN) */
+uint64_t sve_rdvl_1(void)
+{
+ uint64_t vl;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ "rdvl %0, #1;"
+ ".arch_extension nosve\n"
+ : "=r" (vl)
+ );
+
+ sve_traps_restore(flags);
+ return vl;
+}
+
+uint64_t sve_read_zcr_elx(void)
+{
+ unsigned long flags;
+ uint64_t rval;
+
+ sve_traps_save_disable(flags);
+
+ if (IS_IN_EL2()) {
+ rval = read_zcr_el2();
+ } else {
+ rval = read_zcr_el1();
+ }
+
+ sve_traps_restore(flags);
+
+ return rval;
+}
+
+void sve_write_zcr_elx(uint64_t rval)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ if (IS_IN_EL2()) {
+ write_zcr_el2(rval);
+ } else {
+ write_zcr_el1(rval);
+ }
+ isb();
+
+ sve_traps_restore(flags);
+
+ return;
}
/* Set the SVE vector length in the current EL's ZCR_ELx register */
void sve_config_vq(uint8_t sve_vq)
{
+ unsigned long flags;
+
assert(is_armv8_2_sve_present());
+ sve_traps_save_disable(flags);
/* cap vq to arch supported max value */
if (sve_vq > SVE_VQ_ARCH_MAX) {
sve_vq = SVE_VQ_ARCH_MAX;
}
- _sve_config_vq(sve_vq);
+ config_vq(sve_vq);
+
+ sve_traps_restore(flags);
}
/*
@@ -65,8 +139,9 @@
{
uint32_t vl_bitmap = 0;
uint8_t vq, rdvl_vq;
+ unsigned long flags;
- assert(is_armv8_2_sve_present());
+ sve_traps_save_disable(flags);
/* cap vq to arch supported max value */
if (sve_max_vq > SVE_VQ_ARCH_MAX) {
@@ -74,7 +149,7 @@
}
for (vq = 0; vq <= sve_max_vq; vq++) {
- _sve_config_vq(vq);
+ config_vq(vq);
rdvl_vq = SVE_VL_TO_VQ(sve_rdvl_1());
if (vl_bitmap & BIT_32(rdvl_vq)) {
continue;
@@ -82,6 +157,8 @@
vl_bitmap |= BIT_32(rdvl_vq);
}
+ sve_traps_restore(flags);
+
return vl_bitmap;
}
@@ -89,7 +166,7 @@
* Write SVE Z[0-31] registers passed in 'z_regs' for Normal SVE or Streaming
* SVE mode
*/
-void sve_z_regs_write(const sve_z_regs_t *z_regs)
+static void z_regs_write(const sve_z_regs_t *z_regs)
{
__asm__ volatile(
".arch_extension sve\n"
@@ -130,10 +207,27 @@
}
/*
+ * Write SVE Z[0-31] registers passed in 'z_regs' for Normal SVE or Streaming
+ * SVE mode
+ */
+void sve_z_regs_write(const sve_z_regs_t *z_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+ z_regs_write(z_regs);
+ sve_traps_restore(flags);
+}
+
+/*
* Read SVE Z[0-31] and store it in 'zregs' for Normal SVE or Streaming SVE mode
*/
void sve_z_regs_read(sve_z_regs_t *z_regs)
{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
__asm__ volatile(
".arch_extension sve\n"
read_sve_helper(0)
@@ -170,13 +264,11 @@
read_sve_helper(31)
".arch_extension nosve\n"
: : "r" (z_regs));
+
+ sve_traps_restore(flags);
}
-/*
- * Write SVE P[0-15] registers passed in 'p_regs' for Normal SVE or Streaming
- * SVE mode
- */
-void sve_p_regs_write(const sve_p_regs_t *p_regs)
+static void p_regs_write(const sve_p_regs_t *p_regs)
{
__asm__ volatile(
".arch_extension sve\n"
@@ -201,11 +293,28 @@
}
/*
+ * Write SVE P[0-15] registers passed in 'p_regs' for Normal SVE or Streaming
+ * SVE mode
+ */
+void sve_p_regs_write(const sve_p_regs_t *p_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+ p_regs_write(p_regs);
+ sve_traps_restore(flags);
+}
+
+/*
* Read SVE P[0-15] registers and store it in 'p_regs' for Normal SVE or
* Streaming SVE mode
*/
void sve_p_regs_read(sve_p_regs_t *p_regs)
{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
__asm__ volatile(
".arch_extension sve\n"
read_sve_p_helper(0)
@@ -226,13 +335,11 @@
read_sve_p_helper(15)
".arch_extension nosve\n"
: : "r" (p_regs));
+
+ sve_traps_restore(flags);
}
-/*
- * Write SVE FFR registers passed in 'ffr_regs' for Normal SVE or Streaming SVE
- * mode
- */
-void sve_ffr_regs_write(const sve_ffr_regs_t *ffr_regs)
+static void ffr_regs_write(const sve_ffr_regs_t *ffr_regs)
{
uint8_t sve_p_reg[SVE_P_REG_LEN_BYTES];
@@ -250,12 +357,28 @@
}
/*
+ * Write SVE FFR registers passed in 'ffr_regs' for Normal SVE or Streaming SVE
+ * mode
+ */
+void sve_ffr_regs_write(const sve_ffr_regs_t *ffr_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+ ffr_regs_write(ffr_regs);
+ sve_traps_restore(flags);
+}
+
+/*
* Read SVE FFR registers and store it in 'ffr_regs' for Normal SVE or Streaming
* SVE mode
*/
void sve_ffr_regs_read(sve_ffr_regs_t *ffr_regs)
{
uint8_t sve_p_reg[SVE_P_REG_LEN_BYTES];
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
/* Save p0. Read FFR to p0 and save p0 (ffr) to 'ffr_regs'. Restore p0 */
__asm__ volatile(
@@ -268,6 +391,8 @@
:
: "r" (ffr_regs), "r" (sve_p_reg)
: "memory");
+
+ sve_traps_restore(flags);
}
/*
@@ -279,6 +404,9 @@
uint32_t rval;
uint32_t z_size;
uint8_t *z_reg;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
z_size = (uint32_t)sve_rdvl_1();
@@ -290,7 +418,9 @@
memset((void *)z_reg, rval * (i + 1), z_size);
}
- sve_z_regs_write(z_regs);
+ z_regs_write(z_regs);
+
+ sve_traps_restore(flags);
}
/*
@@ -302,6 +432,9 @@
uint32_t p_size;
uint8_t *p_reg;
uint32_t rval;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
p_size = (uint32_t)sve_rdvl_1() / 8;
@@ -313,7 +446,9 @@
memset((void *)p_reg, rval * (i + 1), p_size);
}
- sve_p_regs_write(p_regs);
+ p_regs_write(p_regs);
+
+ sve_traps_restore(flags);
}
/*
@@ -325,6 +460,9 @@
uint32_t ffr_size;
uint8_t *ffr_reg;
uint32_t rval;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
ffr_size = (uint32_t)sve_rdvl_1() / 8;
@@ -335,7 +473,9 @@
memset((void *)ffr_reg, rval * (i + 1), ffr_size);
}
- sve_ffr_regs_write(ffr_regs);
+ ffr_regs_write(ffr_regs);
+
+ sve_traps_restore(flags);
}
/*
@@ -350,6 +490,7 @@
{
uint32_t z_size;
uint64_t cmp_bitmap = 0UL;
+ bool sve_hint;
/*
* 'rdvl' returns Streaming SVE VL if PSTATE.SM=1 else returns normal
@@ -357,11 +498,28 @@
*/
z_size = (uint32_t)sve_rdvl_1();
+ /* Ignore sve_hint for Streaming SVE mode */
+ if (is_feat_sme_supported() && sme_smstat_sm()) {
+ sve_hint = false;
+ } else {
+ sve_hint = tftf_smc_get_sve_hint();
+ }
+
for (uint32_t i = 0U; i < SVE_NUM_VECTORS; i++) {
uint8_t *s1_z = (uint8_t *)s1 + (i * z_size);
uint8_t *s2_z = (uint8_t *)s2 + (i * z_size);
- if ((memcmp(s1_z, s2_z, z_size) == 0)) {
+ /*
+ * For Z register the comparison is successful when
+ * 1. whole Z register of 's1' and 's2' is equal or
+ * 2. sve_hint is set and the lower 128 bits of 's1' and 's2' is
+ * equal and remaining upper bits of 's2' is zero
+ */
+ if ((memcmp(s1_z, s2_z, z_size) == 0) ||
+ (sve_hint && (z_size > FPU_Q_SIZE) &&
+ (memcmp(s1_z, s2_z, FPU_Q_SIZE) == 0) &&
+ (memcmp(s2_z + FPU_Q_SIZE, zero_mem,
+ z_size - FPU_Q_SIZE) == 0))) {
continue;
}
@@ -384,15 +542,29 @@
{
uint32_t p_size;
uint64_t cmp_bitmap = 0UL;
+ bool sve_hint;
/* Size of one predicate register 1/8 of Z register */
p_size = (uint32_t)sve_rdvl_1() / 8U;
+ /* Ignore sve_hint for Streaming SVE mode */
+ if (is_feat_sme_supported() && sme_smstat_sm()) {
+ sve_hint = false;
+ } else {
+ sve_hint = tftf_smc_get_sve_hint();
+ }
+
for (uint32_t i = 0U; i < SVE_NUM_P_REGS; i++) {
uint8_t *s1_p = (uint8_t *)s1 + (i * p_size);
uint8_t *s2_p = (uint8_t *)s2 + (i * p_size);
- if ((memcmp(s1_p, s2_p, p_size) == 0)) {
+ /*
+ * For P register the comparison is successful when
+ * 1. whole P register of 's1' and 's2' is equal or
+ * 2. sve_hint is set and the P register of 's2' is zero
+ */
+ if ((memcmp(s1_p, s2_p, p_size) == 0) ||
+ (sve_hint && (memcmp(s2_p, zero_mem, p_size) == 0))) {
continue;
}
@@ -415,15 +587,29 @@
{
uint32_t ffr_size;
uint64_t cmp_bitmap = 0UL;
+ bool sve_hint;
/* Size of one FFR register 1/8 of Z register */
ffr_size = (uint32_t)sve_rdvl_1() / 8U;
+ /* Ignore sve_hint for Streaming SVE mode */
+ if (is_feat_sme_supported() && sme_smstat_sm()) {
+ sve_hint = false;
+ } else {
+ sve_hint = tftf_smc_get_sve_hint();
+ }
+
for (uint32_t i = 0U; i < SVE_NUM_FFR_REGS; i++) {
uint8_t *s1_ffr = (uint8_t *)s1 + (i * ffr_size);
uint8_t *s2_ffr = (uint8_t *)s2 + (i * ffr_size);
- if ((memcmp(s1_ffr, s2_ffr, ffr_size) == 0)) {
+ /*
+ * For FFR register the comparison is successful when
+ * 1. whole FFR register of 's1' and 's2' is equal or
+ * 2. sve_hint is set and the FFR register of 's2' is zero
+ */
+ if ((memcmp(s1_ffr, s2_ffr, ffr_size) == 0) ||
+ (sve_hint && (memcmp(s2_ffr, zero_mem, ffr_size) == 0))) {
continue;
}
diff --git a/lib/smc/aarch64/smc.c b/lib/smc/aarch64/smc.c
index 6667ee7..9912e72 100644
--- a/lib/smc/aarch64/smc.c
+++ b/lib/smc/aarch64/smc.c
@@ -1,24 +1,99 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <arch.h>
+#include <arch_features.h>
+#include <lib/extensions/sve.h>
#include <stdint.h>
+#include <smccc.h>
#include <tftf.h>
+#include <utils_def.h>
-smc_ret_values asm_tftf_smc64(uint32_t fid,
- u_register_t arg1,
- u_register_t arg2,
- u_register_t arg3,
- u_register_t arg4,
- u_register_t arg5,
- u_register_t arg6,
- u_register_t arg7);
+static void sve_enable(void)
+{
+ if (IS_IN_EL2()) {
+ write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TZ_BIT);
+ } else {
+ write_cpacr_el1(read_cpacr_el1() |
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE));
+ }
+
+ isb();
+}
+
+static void sve_disable(void)
+{
+ if (IS_IN_EL2()) {
+ write_cptr_el2(read_cptr_el2() | CPTR_EL2_TZ_BIT);
+ } else {
+ unsigned long val = read_cpacr_el1();
+
+ val &= ~CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE);
+ val |= CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_ALL);
+ write_cpacr_el1(val);
+ }
+
+ isb();
+}
+
+static bool is_sve_enabled(void)
+{
+ if (IS_IN_EL2()) {
+ return ((read_cptr_el2() & CPTR_EL2_TZ_BIT) == 0UL);
+ } else {
+ return ((read_cpacr_el1() &
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE)) ==
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE));
+ }
+}
+
+/*
+ * Use Trap control register SVE flags to represent SVE hint bit. On SVE capable
+ * CPU, setting sve_hint_flag = true denotes absence of SVE (disables SVE), else
+ * presence of SVE (enables SVE).
+ */
+void tftf_smc_set_sve_hint(bool sve_hint_flag)
+{
+ if (!is_armv8_2_sve_present()) {
+ return;
+ }
+
+ if (sve_hint_flag) {
+ sve_disable();
+ } else {
+ sve_enable();
+ }
+}
+
+/*
+ * On SVE capable CPU, return value of 'true' denotes SVE not used and return
+ * value of 'false' denotes SVE used.
+ *
+ * If the CPU do not support SVE, always return 'false'.
+ */
+bool tftf_smc_get_sve_hint(void)
+{
+ if (is_armv8_2_sve_present()) {
+ return is_sve_enabled() ? false : true;
+ }
+
+ return false;
+}
smc_ret_values tftf_smc(const smc_args *args)
{
- return asm_tftf_smc64(args->fid,
+ uint32_t fid = args->fid;
+
+ if (tftf_smc_get_sve_hint()) {
+ fid |= MASK(FUNCID_SVE_HINT);
+ } else {
+ fid &= ~MASK(FUNCID_SVE_HINT);
+ }
+
+ return asm_tftf_smc64(fid,
args->arg1,
args->arg2,
args->arg3,
diff --git a/spm/cactus/cactus_tests/cactus_test_interrupts.c b/spm/cactus/cactus_tests/cactus_test_interrupts.c
index 6a1092b..4250445 100644
--- a/spm/cactus/cactus_tests/cactus_test_interrupts.c
+++ b/spm/cactus/cactus_tests/cactus_test_interrupts.c
@@ -256,7 +256,17 @@
sp_register_interrupt_handler(sec_interrupt_test_espi_handled,
espi_id);
- ret = tftf_smc(&plat_sip_call);
+
+ /*
+ * Call the low level assembler routine to make the SMC call bypassing
+ * tftf_smc, as tftf_smc will set SVE hint bit in SMC FID when CPU
+ * supports SVE and SVE traps are enabled.
+ *
+ * This can be changed to tftf_smc call once SPMC disregards SVE hint bit
+ * from function identification.
+ */
+ ret = asm_tftf_smc64(plat_sip_call.fid, plat_sip_call.arg1, 0, 0, 0,
+ 0, 0, 0);
if (ret.ret0 == SMC_UNKNOWN) {
ERROR("SiP SMC call not supported\n");
diff --git a/tftf/framework/aarch64/arch.c b/tftf/framework/aarch64/arch.c
index 0510678..f1223a1 100644
--- a/tftf/framework/aarch64/arch.c
+++ b/tftf/framework/aarch64/arch.c
@@ -7,6 +7,8 @@
#include <arch_features.h>
#include <arch_helpers.h>
+#include <arch_features.h>
+#include <tftf_lib.h>
void tftf_arch_setup(void)
{
@@ -39,5 +41,10 @@
write_smcr_el2(SMCR_EL2_RESET_VAL);
isb();
}
+
+ /* Clear SVE hint bit */
+ if (is_armv8_2_sve_present()) {
+ tftf_smc_set_sve_hint(false);
+ }
}
}