aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.readthedocs.yaml29
-rw-r--r--Makefile222
-rw-r--r--branch_protection.mk3
-rw-r--r--docs/about/contact.rst14
-rw-r--r--docs/about/features.rst5
-rw-r--r--docs/about/maintainers.rst22
-rw-r--r--docs/change-log.rst568
-rw-r--r--docs/getting_started/build-options.rst9
-rw-r--r--docs/getting_started/build.rst37
-rw-r--r--docs/getting_started/requirements.rst18
-rw-r--r--docs/index.rst1
-rw-r--r--docs/plat/index.rst18
-rw-r--r--docs/plat/xilinx-versal.rst53
-rw-r--r--docs/plat/xilinx-versal_net.rst46
-rw-r--r--docs/plat/xilinx-zynqmp.rst45
-rw-r--r--docs/requirements.txt2
-rw-r--r--drivers/arm/gic/arm_gic_v2.c6
-rw-r--r--drivers/arm/gic/arm_gic_v2v3.c19
-rw-r--r--drivers/arm/gic/gic_v3.c5
-rw-r--r--drivers/arm/sp805/sp805.c58
-rw-r--r--drivers/cadence/uart/aarch64/cdns_console.S201
-rw-r--r--el3_payload/Makefile4
-rw-r--r--el3_payload/plat/tc/platform.S (renamed from el3_payload/plat/tc0/platform.S)12
-rw-r--r--el3_payload/plat/tc/platform.h (renamed from el3_payload/plat/tc0/platform.h)4
-rw-r--r--el3_payload/plat/tc/platform.mk (renamed from el3_payload/plat/tc0/platform.mk)0
-rw-r--r--fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S15
-rw-r--r--fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S15
-rw-r--r--fwu/ns_bl1u/ns_bl1u.ld.S4
-rw-r--r--fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S15
-rw-r--r--fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S15
-rw-r--r--fwu/ns_bl2u/ns_bl2u.ld.S5
-rw-r--r--fwu/ns_bl2u/ns_bl2u.mk1
-rw-r--r--include/common/aarch64/asm_macros.S11
-rw-r--r--include/common/debug.h7
-rw-r--r--include/common/firmware_image_package.h2
-rw-r--r--include/common/test_helpers.h221
-rw-r--r--include/drivers/arm/arm_gic.h6
-rw-r--r--include/drivers/arm/gic_v3.h32
-rw-r--r--include/drivers/arm/sp805.h10
-rw-r--r--include/drivers/cadence/cdns_uart.h32
-rw-r--r--include/lib/aarch32/arch.h83
-rw-r--r--include/lib/aarch32/arch_features.h26
-rw-r--r--include/lib/aarch32/arch_helpers.h23
-rw-r--r--include/lib/aarch64/arch.h546
-rw-r--r--include/lib/aarch64/arch_features.h357
-rw-r--r--include/lib/aarch64/arch_helpers.h161
-rw-r--r--include/lib/aarch64/serror.h14
-rw-r--r--include/lib/aarch64/sync.h14
-rw-r--r--include/lib/extensions/fpu.h45
-rw-r--r--include/lib/extensions/pauth.h24
-rw-r--r--include/lib/extensions/sme.h48
-rw-r--r--include/lib/extensions/sve.h90
-rw-r--r--include/lib/heap/page_alloc.h39
-rw-r--r--include/lib/sprt/sprt_client.h49
-rw-r--r--include/lib/sprt/sprt_common.h38
-rw-r--r--include/lib/tftf_lib.h35
-rw-r--r--include/lib/transfer_list.h91
-rw-r--r--include/lib/utils_def.h24
-rw-r--r--include/lib/xlat_tables/xlat_tables_defs.h22
-rw-r--r--include/plat/common/common_def.h35
-rw-r--r--include/plat/common/plat_topology.h2
-rw-r--r--include/plat/common/platform.h9
-rw-r--r--include/runtime_services/arm_arch_svc.h3
-rw-r--r--include/runtime_services/cactus_message_loop.h12
-rw-r--r--include/runtime_services/cactus_test_cmds.h476
-rw-r--r--include/runtime_services/errata_abi.h87
-rw-r--r--include/runtime_services/ffa_endpoints.h19
-rw-r--r--include/runtime_services/ffa_helpers.h669
-rw-r--r--include/runtime_services/ffa_svc.h110
-rw-r--r--include/runtime_services/host_realm_managment/host_realm_helper.h58
-rw-r--r--include/runtime_services/host_realm_managment/host_realm_mem_layout.h60
-rw-r--r--include/runtime_services/host_realm_managment/host_realm_pmu.h29
-rw-r--r--include/runtime_services/host_realm_managment/host_realm_rmi.h613
-rw-r--r--include/runtime_services/host_realm_managment/host_realm_simd.h29
-rw-r--r--include/runtime_services/host_realm_managment/host_shared_data.h149
-rw-r--r--include/runtime_services/host_realm_managment/realm_def.h32
-rw-r--r--include/runtime_services/host_realm_managment/rmi_spm_tests.h15
-rw-r--r--include/runtime_services/psci.h7
-rw-r--r--include/runtime_services/secure_el1_payloads/tsp.h3
-rw-r--r--include/runtime_services/smccc.h5
-rw-r--r--include/runtime_services/spm_common.h91
-rw-r--r--include/runtime_services/spm_test_helpers.h119
-rw-r--r--include/runtime_services/sprt_svc.h74
-rw-r--r--include/runtime_services/trng.h19
-rw-r--r--lib/aarch64/exception_stubs.S39
-rw-r--r--lib/aarch64/misc_helpers.S5
-rw-r--r--lib/errata_abi/errata_abi.c54
-rw-r--r--lib/exceptions/aarch64/serror.c31
-rw-r--r--lib/exceptions/aarch64/sync.c45
-rw-r--r--lib/exceptions/irq.c (renamed from lib/irq/irq.c)0
-rw-r--r--lib/extensions/fpu/fpu.c169
-rw-r--r--lib/extensions/pauth/aarch64/pauth.c151
-rw-r--r--lib/extensions/sme/aarch64/sme.c147
-rw-r--r--lib/extensions/sme/aarch64/sme2_helpers.S73
-rw-r--r--lib/extensions/sme/aarch64/sme_helpers.S115
-rw-r--r--lib/extensions/sve/aarch64/sve.c621
-rw-r--r--lib/extensions/sve/aarch64/sve_helpers.S105
-rw-r--r--lib/heap/page_alloc.c101
-rw-r--r--lib/libc/snprintf.c3
-rw-r--r--lib/power_management/suspend/aarch64/asm_tftf_suspend.S11
-rw-r--r--lib/power_management/suspend/suspend_private.h12
-rw-r--r--lib/psci/psci.c13
-rw-r--r--lib/smc/aarch64/smc.c95
-rw-r--r--lib/sprt/aarch64/sprt_client_helpers.S41
-rw-r--r--lib/sprt/sprt_client.c80
-rw-r--r--lib/sprt/sprt_client.mk12
-rw-r--r--lib/sprt/sprt_client_private.h35
-rw-r--r--lib/sprt/sprt_queue.c104
-rw-r--r--lib/sprt/sprt_queue.h47
-rw-r--r--lib/transfer_list/transfer_list.c57
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c28
-rw-r--r--make_helpers/build_macros.mk31
-rw-r--r--make_helpers/defaults.mk10
-rw-r--r--plat/arm/common/arm_fwu_io_storage.c3
-rw-r--r--plat/arm/corstone1000/corstone1000_def.h48
-rw-r--r--plat/arm/corstone1000/corstone1000_mem_prot.c23
-rw-r--r--plat/arm/corstone1000/corstone1000_pwr_state.c66
-rw-r--r--plat/arm/corstone1000/corstone1000_topology.c48
-rw-r--r--plat/arm/corstone1000/include/platform_def.h174
-rw-r--r--plat/arm/corstone1000/plat_helpers.S91
-rw-r--r--plat/arm/corstone1000/plat_setup.c40
-rw-r--r--plat/arm/corstone1000/platform.mk28
-rw-r--r--plat/arm/corstone1000/tests_to_skip.txt16
-rw-r--r--plat/arm/fvp/fvp_tests_to_skip.txt4
-rw-r--r--plat/arm/fvp/include/platform_def.h53
-rw-r--r--plat/arm/fvp/platform.mk31
-rw-r--r--plat/arm/juno/include/platform_def.h4
-rw-r--r--plat/arm/juno/juno32_tests_to_skip.txt3
-rw-r--r--plat/arm/juno/juno64_tests_to_skip.txt3
-rw-r--r--plat/arm/n1sdp/aarch64/plat_helpers.S90
-rw-r--r--plat/arm/n1sdp/include/platform_def.h158
-rw-r--r--plat/arm/n1sdp/n1sdp_mem_prot.c20
-rw-r--r--plat/arm/n1sdp/n1sdp_pwr_state.c43
-rw-r--r--plat/arm/n1sdp/n1sdp_topology.c52
-rw-r--r--plat/arm/n1sdp/plat_setup.c28
-rw-r--r--plat/arm/n1sdp/platform.mk38
-rw-r--r--plat/arm/n1sdp/tests_to_skip.txt33
-rw-r--r--plat/arm/neoverse_rd/common/arch/aarch64/plat_helpers.S (renamed from plat/arm/sgi/common/aarch64/plat_helpers.S)14
-rw-r--r--plat/arm/neoverse_rd/common/include/nrd_base_platform_def.h (renamed from plat/arm/sgi/common/include/sgi_base_platform_def.h)116
-rw-r--r--plat/arm/neoverse_rd/common/include/nrd_soc_css_def.h19
-rw-r--r--plat/arm/neoverse_rd/common/include/nrd_soc_css_def_v2.h20
-rw-r--r--plat/arm/neoverse_rd/common/include/nrd_soc_platform_def.h16
-rw-r--r--plat/arm/neoverse_rd/common/include/nrd_soc_platform_def_v2.h16
-rw-r--r--plat/arm/neoverse_rd/common/nrd_common.mk27
-rw-r--r--plat/arm/neoverse_rd/common/nrd_mem_prot.c20
-rw-r--r--plat/arm/neoverse_rd/common/nrd_pwr_state.c (renamed from plat/arm/sgi/common/sgi_pwr_state.c)22
-rw-r--r--plat/arm/neoverse_rd/common/plat_setup.c (renamed from plat/arm/sgi/common/plat_setup.c)8
-rw-r--r--plat/arm/neoverse_rd/platform/rdn1edge/include/platform_def.h25
-rw-r--r--plat/arm/neoverse_rd/platform/rdn1edge/platform.mk18
-rw-r--r--plat/arm/neoverse_rd/platform/rdn1edge/tests_to_skip.txt (renamed from plat/arm/rdinfra/rdn1edge/tests_to_skip.txt)5
-rw-r--r--plat/arm/neoverse_rd/platform/rdn1edge/topology.c (renamed from plat/arm/rdinfra/rdn1edge/topology.c)8
-rw-r--r--plat/arm/neoverse_rd/platform/rdn2/include/platform_def.h39
-rw-r--r--plat/arm/neoverse_rd/platform/rdn2/platform.mk21
-rw-r--r--plat/arm/neoverse_rd/platform/rdn2/tests_to_skip.txt16
-rw-r--r--plat/arm/neoverse_rd/platform/rdn2/topology.c111
-rw-r--r--plat/arm/neoverse_rd/platform/rdv1/include/platform_def.h25
-rw-r--r--plat/arm/neoverse_rd/platform/rdv1/platform.mk18
-rw-r--r--plat/arm/neoverse_rd/platform/rdv1/tests_to_skip.txt22
-rw-r--r--plat/arm/neoverse_rd/platform/rdv1/topology.c107
-rw-r--r--plat/arm/neoverse_rd/platform/sgi575/include/platform_def.h25
-rw-r--r--plat/arm/neoverse_rd/platform/sgi575/platform.mk18
-rw-r--r--plat/arm/neoverse_rd/platform/sgi575/sgi575_topology.c (renamed from plat/arm/sgi/sgi575/sgi575_topology.c)8
-rw-r--r--plat/arm/neoverse_rd/platform/sgi575/tests_to_skip.txt (renamed from plat/arm/sgi/sgi575/tests_to_skip.txt)5
-rw-r--r--plat/arm/rdinfra/rdn1edge/include/platform_def.h16
-rw-r--r--plat/arm/rdinfra/rdn1edge/platform.mk13
-rw-r--r--plat/arm/sgi/common/sgi_common.mk24
-rw-r--r--plat/arm/sgi/common/sgi_mem_prot.c20
-rw-r--r--plat/arm/sgi/sgi575/include/platform_def.h16
-rw-r--r--plat/arm/sgi/sgi575/platform.mk13
-rw-r--r--plat/arm/tc/aarch64/plat_helpers.S (renamed from plat/arm/tc0/aarch64/plat_helpers.S)12
-rw-r--r--plat/arm/tc/include/platform_def.h (renamed from plat/arm/tc0/include/platform_def.h)43
-rw-r--r--plat/arm/tc/plat_setup.c (renamed from plat/arm/tc0/plat_setup.c)6
-rw-r--r--plat/arm/tc/platform.mk43
-rw-r--r--plat/arm/tc/tc_mem_prot.c20
-rw-r--r--plat/arm/tc/tc_pwr_state.c (renamed from plat/arm/tc0/tc0_pwr_state.c)22
-rw-r--r--plat/arm/tc/tc_topology.c (renamed from plat/arm/tc0/tc0_topology.c)18
-rw-r--r--plat/arm/tc/tests_to_skip.txt (renamed from plat/arm/tc0/tests_to_skip.txt)10
-rw-r--r--plat/arm/tc0/platform.mk42
-rw-r--r--plat/arm/tc0/tc0_mem_prot.c20
-rw-r--r--plat/common/plat_topology.c5
-rw-r--r--plat/hisilicon/hikey960/tests.xml2
-rw-r--r--plat/nvidia/tegra186/include/platform_def.h1
-rw-r--r--plat/nvidia/tegra186/tests_to_skip.txt3
-rw-r--r--plat/nvidia/tegra194/include/platform_def.h1
-rw-r--r--plat/nvidia/tegra194/tests_to_skip.txt3
-rw-r--r--plat/nvidia/tegra210/include/platform_def.h1
-rw-r--r--plat/nvidia/tegra210/tests_to_skip.txt3
-rw-r--r--plat/xilinx/common/timer/timers.c168
-rw-r--r--plat/xilinx/versal/aarch64/plat_helpers.S73
-rw-r--r--plat/xilinx/versal/include/platform_def.h121
-rw-r--r--plat/xilinx/versal/platform.mk28
-rw-r--r--plat/xilinx/versal/tests_to_skip.txt54
-rw-r--r--plat/xilinx/versal/versal_pwr_state.c59
-rw-r--r--plat/xilinx/versal/versal_setup.c81
-rw-r--r--plat/xilinx/versal_net/aarch64/plat_helpers.S95
-rw-r--r--plat/xilinx/versal_net/include/platform_def.h127
-rw-r--r--plat/xilinx/versal_net/include/util.h29
-rw-r--r--plat/xilinx/versal_net/platform.mk28
-rw-r--r--plat/xilinx/versal_net/tests_to_skip.txt68
-rw-r--r--plat/xilinx/versal_net/versal_net_pwr_state.c59
-rw-r--r--plat/xilinx/versal_net/versal_net_setup.c88
-rw-r--r--plat/xilinx/zynqmp/aarch64/plat_helpers.S84
-rw-r--r--plat/xilinx/zynqmp/include/platform_def.h122
-rw-r--r--plat/xilinx/zynqmp/platform.mk37
-rw-r--r--plat/xilinx/zynqmp/tests_to_skip.txt65
-rw-r--r--plat/xilinx/zynqmp/zynqmp_pwr_state.c59
-rw-r--r--plat/xilinx/zynqmp/zynqmp_setup.c52
-rw-r--r--plat/xilinx/zynqmp/zynqmp_topology.c45
-rw-r--r--readme.rst2
-rw-r--r--realm/aarch64/realm_entrypoint.S120
-rw-r--r--realm/aarch64/realm_exceptions.S140
-rw-r--r--realm/include/realm_psci.h15
-rw-r--r--realm/include/realm_rsi.h140
-rw-r--r--realm/include/realm_tests.h32
-rw-r--r--realm/realm.ld.S79
-rw-r--r--realm/realm.mk69
-rw-r--r--realm/realm_debug.c61
-rw-r--r--realm/realm_exception_report.c50
-rw-r--r--realm/realm_interrupt.c27
-rw-r--r--realm/realm_multiple_rec.c117
-rw-r--r--realm/realm_pauth.c98
-rw-r--r--realm/realm_payload_main.c320
-rw-r--r--realm/realm_pmuv3.c331
-rw-r--r--realm/realm_psci.c98
-rw-r--r--realm/realm_rsi.c85
-rw-r--r--realm/realm_shared_data.c61
-rw-r--r--realm/realm_simd.c255
-rw-r--r--smc_fuzz/dts/sdei.dts14
-rw-r--r--smc_fuzz/dts/top.dts69
-rw-r--r--smc_fuzz/include/fifo3d.h12
-rw-r--r--smc_fuzz/include/fuzz_helper.h7
-rw-r--r--smc_fuzz/include/nfifo.h32
-rw-r--r--smc_fuzz/include/sdei_fuzz_helper.h39
-rw-r--r--smc_fuzz/include/smcmalloc.h3
-rw-r--r--smc_fuzz/include/tsp_fuzz_helper.h28
-rw-r--r--smc_fuzz/src/fifo3d.c48
-rw-r--r--smc_fuzz/src/nfifo.c91
-rw-r--r--smc_fuzz/src/randsmcmod.c230
-rw-r--r--smc_fuzz/src/runtestfunction_helpers.c18
-rw-r--r--smc_fuzz/src/sdei_fuzz_helper.c60
-rw-r--r--smc_fuzz/src/tsp_fuzz_helper.c43
-rw-r--r--spm/README.txt12
-rw-r--r--spm/cactus/aarch64/cactus_entrypoint.S46
-rw-r--r--spm/cactus/aarch64/cactus_exceptions.S39
-rw-r--r--spm/cactus/cactus.h7
-rw-r--r--spm/cactus/cactus.ld.S6
-rw-r--r--spm/cactus/cactus.mk44
-rw-r--r--spm/cactus/cactus_def.h39
-rw-r--r--spm/cactus/cactus_interrupt.c131
-rw-r--r--spm/cactus/cactus_main.c177
-rw-r--r--spm/cactus/cactus_tests.h44
-rw-r--r--spm/cactus/cactus_tests/SMMUv3TestEngine.h431
-rw-r--r--spm/cactus/cactus_tests/cactus_message_loop.c61
-rw-r--r--spm/cactus/cactus_tests/cactus_test_cpu_features.c29
-rw-r--r--spm/cactus/cactus_tests/cactus_test_direct_messaging.c46
-rw-r--r--spm/cactus/cactus_tests/cactus_test_ffa.c177
-rw-r--r--spm/cactus/cactus_tests/cactus_test_interrupts.c249
-rw-r--r--spm/cactus/cactus_tests/cactus_test_memory_sharing.c335
-rw-r--r--spm/cactus/cactus_tests/cactus_test_notifications.c174
-rw-r--r--spm/cactus/cactus_tests/cactus_test_simd.c53
-rw-r--r--spm/cactus/cactus_tests/cactus_tests_smmuv3.c175
-rw-r--r--spm/cactus/cactus_tests/smmuv3_test_engine.h1
-rw-r--r--spm/cactus/cactus_tests_memory_attributes.c224
-rw-r--r--spm/cactus/cactus_tests_system_setup.c71
-rw-r--r--spm/cactus/plat/arm/fvp/fdts/cactus-secondary.dts44
-rw-r--r--spm/cactus/plat/arm/fvp/fdts/cactus-tertiary.dts53
-rw-r--r--spm/cactus/plat/arm/fvp/fdts/cactus.dts84
-rw-r--r--spm/cactus/plat/arm/fvp/include/cactus_platform_def.h33
-rw-r--r--spm/cactus/plat/arm/fvp/include/sp_platform_def.h39
-rw-r--r--spm/cactus/plat/arm/tc/fdts/cactus-secondary.dts (renamed from spm/cactus/plat/arm/tc0/fdts/cactus-secondary.dts)24
-rw-r--r--spm/cactus/plat/arm/tc/fdts/cactus-tertiary.dts (renamed from spm/cactus/plat/arm/tc0/fdts/cactus-tertiary.dts)27
-rw-r--r--spm/cactus/plat/arm/tc/fdts/cactus.dts (renamed from spm/cactus/plat/arm/tc0/fdts/cactus.dts)29
-rw-r--r--spm/cactus/plat/arm/tc/include/sp_platform_def.h41
-rw-r--r--spm/cactus/plat/arm/tc/platform.mk17
-rw-r--r--spm/cactus/plat/arm/tc0/include/cactus_platform_def.h24
-rw-r--r--spm/cactus/plat/arm/tc0/platform.mk17
-rw-r--r--spm/cactus_mm/cactus_mm.mk3
-rw-r--r--spm/common/sp_debug.c (renamed from spm/cactus/cactus_debug.c)32
-rw-r--r--spm/common/sp_debug.h13
-rw-r--r--spm/common/sp_def.h46
-rw-r--r--spm/common/sp_helpers.c53
-rw-r--r--spm/common/sp_helpers.h30
-rw-r--r--spm/common/sp_tests/sp_test_ffa.c268
-rw-r--r--spm/common/sp_tests/sp_tests.h18
-rw-r--r--spm/common/spm_helpers.c40
-rw-r--r--spm/common/spm_helpers.h6
-rw-r--r--spm/include/sp_res_desc_def.h94
-rw-r--r--spm/ivy/aarch64/ivy_entrypoint.S27
-rw-r--r--spm/ivy/app/aarch64/ivy_entrypoint.S78
-rw-r--r--spm/ivy/app/ivy.h46
-rw-r--r--spm/ivy/app/ivy_def.h (renamed from spm/ivy/ivy_def.h)4
-rw-r--r--spm/ivy/app/ivy_main.c65
-rw-r--r--spm/ivy/app/plat/arm/fvp/fdts/ivy-sel0.dts30
-rw-r--r--spm/ivy/app/plat/arm/fvp/fdts/ivy-sel1.dts32
-rw-r--r--spm/ivy/app/plat/arm/fvp/include/sp_platform_def.h18
-rw-r--r--spm/ivy/app/plat/arm/fvp/platform.mk19
-rw-r--r--spm/ivy/app/plat/arm/tc/fdts/ivy-sel0.dts29
-rw-r--r--spm/ivy/app/plat/arm/tc/fdts/ivy-sel1.dts30
-rw-r--r--spm/ivy/app/plat/arm/tc/include/sp_platform_def.h18
-rw-r--r--spm/ivy/app/plat/arm/tc/platform.mk19
-rw-r--r--spm/ivy/ivy.dts95
-rw-r--r--spm/ivy/ivy.h29
-rw-r--r--spm/ivy/ivy.ld.S73
-rw-r--r--spm/ivy/ivy.mk89
-rw-r--r--spm/ivy/ivy_main.c135
-rw-r--r--spm/ivy/shim/aarch64/spm_shim_entrypoint.S83
-rw-r--r--spm/ivy/shim/aarch64/spm_shim_exceptions.S96
-rw-r--r--spm/ivy/shim/shim_main.c106
-rw-r--r--spm/quark/aarch64/quark_entrypoint.S27
-rw-r--r--spm/quark/quark.dts62
-rw-r--r--spm/quark/quark.h29
-rw-r--r--spm/quark/quark.ld.S55
-rw-r--r--spm/quark/quark.mk71
-rw-r--r--spm/quark/quark_def.h45
-rw-r--r--spm/quark/quark_main.c78
-rw-r--r--tftf/framework/aarch64/arch.c21
-rw-r--r--tftf/framework/aarch64/entrypoint.S30
-rw-r--r--tftf/framework/aarch64/exceptions.S71
-rw-r--r--tftf/framework/framework.mk18
-rw-r--r--tftf/framework/include/tftf.h2
-rw-r--r--tftf/framework/main.c33
-rw-r--r--tftf/framework/tftf.ld.S38
-rw-r--r--tftf/tests/aarch32_tests_to_skip.txt20
-rw-r--r--tftf/tests/common/test_helpers.c62
-rw-r--r--tftf/tests/extensions/afp/test_afp.c35
-rw-r--r--tftf/tests/extensions/brbe/test_brbe.c36
-rw-r--r--tftf/tests/extensions/fgt/test_fgt.c108
-rw-r--r--tftf/tests/extensions/hcx/test_hcx.c46
-rw-r--r--tftf/tests/extensions/mpam/test_mpam.c28
-rw-r--r--tftf/tests/extensions/pauth/test_pauth.c165
-rw-r--r--tftf/tests/extensions/pmuv3/test_pmuv3.c249
-rw-r--r--tftf/tests/extensions/rng_trap/test_rng_trap.c86
-rw-r--r--tftf/tests/extensions/sme/test_sme.c178
-rw-r--r--tftf/tests/extensions/sme/test_sme2.c113
-rw-r--r--tftf/tests/extensions/spe/test_spe.c47
-rw-r--r--tftf/tests/extensions/sve/sve_operations.S39
-rw-r--r--tftf/tests/extensions/sve/test_sve.c13
-rw-r--r--tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c59
-rw-r--r--tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h15
-rw-r--r--tftf/tests/extensions/trbe/test_trbe.c33
-rw-r--r--tftf/tests/extensions/trf/test_trf.c31
-rw-r--r--tftf/tests/extensions/wfxt/test_wfxt.c89
-rw-r--r--tftf/tests/misc_tests/inject_ras_error.S (renamed from tftf/tests/misc_tests/inject_serror.S)52
-rw-r--r--tftf/tests/misc_tests/test_ea_ffh.c84
-rw-r--r--tftf/tests/misc_tests/test_firmware_handoff.c56
-rw-r--r--tftf/tests/misc_tests/test_invalid_access.c364
-rw-r--r--tftf/tests/misc_tests/test_nop.c84
-rw-r--r--tftf/tests/misc_tests/test_ras_ffh_nested.c139
-rw-r--r--tftf/tests/misc_tests/test_ras_kfh.c52
-rw-r--r--tftf/tests/misc_tests/test_ras_kfh_reflect.c181
-rw-r--r--tftf/tests/misc_tests/test_single_fault.c21
-rw-r--r--tftf/tests/misc_tests/test_uncontainable.c6
-rw-r--r--tftf/tests/misc_tests/test_undef_injection.c70
-rw-r--r--tftf/tests/performance_tests/test_psci_latencies.c21
-rw-r--r--tftf/tests/plat/xilinx/common/plat_pm.c87
-rw-r--r--tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c164
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c240
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c496
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c1258
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/host_shared_data.c79
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c348
-rw-r--r--tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c485
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c792
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c1365
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c1824
-rw-r--r--tftf/tests/runtime_services/realm_payload/host_realm_spm.c413
-rw-r--r--tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S42
-rw-r--r--tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S62
-rw-r--r--tftf/tests/runtime_services/secure_service/ffa_helpers.c622
-rw-r--r--tftf/tests/runtime_services/secure_service/spm_common.c748
-rw-r--r--tftf/tests/runtime_services/secure_service/spm_test_helpers.c128
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c71
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c117
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_features.c47
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c73
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c585
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c1074
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_notifications.c1564
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c67
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c518
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c477
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_smccc.c165
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S240
-rw-r--r--tftf/tests/runtime_services/secure_service/test_ffa_version.c88
-rw-r--r--tftf/tests/runtime_services/secure_service/test_quark_request.c65
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c189
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c170
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_handle_open.c108
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c146
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c222
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c69
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spm_simd.c211
-rw-r--r--tftf/tests/runtime_services/secure_service/test_spm_smmu.c155
-rw-r--r--tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c739
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c484
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c8
-rw-r--r--tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c21
-rw-r--r--tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S30
-rw-r--r--tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c288
-rw-r--r--tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c15
-rw-r--r--tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c75
-rw-r--r--tftf/tests/tbb-tests/tbb_test_infra.c45
-rw-r--r--tftf/tests/tbb-tests/tbb_test_infra.h15
-rw-r--r--tftf/tests/tbb-tests/test_tbb_corrupt_fip.c44
-rw-r--r--tftf/tests/tests-corrupt-fip.mk15
-rw-r--r--tftf/tests/tests-corrupt-fip.xml15
-rw-r--r--tftf/tests/tests-cpu-extensions.mk23
-rw-r--r--tftf/tests/tests-cpu-extensions.xml17
-rw-r--r--tftf/tests/tests-ea-ffh.mk7
-rw-r--r--tftf/tests/tests-ea-ffh.xml15
-rw-r--r--tftf/tests/tests-errata_abi.mk7
-rw-r--r--tftf/tests/tests-errata_abi.xml15
-rw-r--r--tftf/tests/tests-extensive.mk4
-rw-r--r--tftf/tests/tests-extensive.xml28
-rw-r--r--tftf/tests/tests-firmware-handoff.mk13
-rw-r--r--tftf/tests/tests-firmware-handoff.xml14
-rw-r--r--tftf/tests/tests-hcx.mk9
-rw-r--r--tftf/tests/tests-hcx.xml15
-rw-r--r--tftf/tests/tests-memory-access.mk30
-rw-r--r--tftf/tests/tests-memory-access.xml62
-rw-r--r--tftf/tests/tests-nop.mk9
-rw-r--r--tftf/tests/tests-nop.xml16
-rw-r--r--tftf/tests/tests-psci.xml12
-rw-r--r--tftf/tests/tests-quark.mk9
-rw-r--r--tftf/tests/tests-quark.xml19
-rw-r--r--tftf/tests/tests-ras-ffh-nested.mk10
-rw-r--r--tftf/tests/tests-ras-ffh-nested.xml13
-rw-r--r--tftf/tests/tests-ras-kfh-reflect.mk10
-rw-r--r--tftf/tests/tests-ras-kfh-reflect.xml14
-rw-r--r--tftf/tests/tests-ras-kfh.mk10
-rw-r--r--tftf/tests/tests-ras-kfh.xml13
-rw-r--r--tftf/tests/tests-realm-payload.mk45
-rw-r--r--tftf/tests/tests-realm-payload.xml111
-rw-r--r--tftf/tests/tests-rmi-spm.mk31
-rw-r--r--tftf/tests/tests-rmi-spm.xml18
-rw-r--r--tftf/tests/tests-rng_trap.mk9
-rw-r--r--tftf/tests/tests-rng_trap.xml16
-rw-r--r--tftf/tests/tests-sdei.mk3
-rw-r--r--tftf/tests/tests-sdei.xml3
-rw-r--r--tftf/tests/tests-single-fault.mk4
-rw-r--r--tftf/tests/tests-smcfuzzing.mk40
-rw-r--r--tftf/tests/tests-spm.mk37
-rw-r--r--tftf/tests/tests-spm.xml179
-rw-r--r--tftf/tests/tests-standard.mk14
-rw-r--r--tftf/tests/tests-standard.xml8
-rw-r--r--tftf/tests/tests-tftf-validation.xml1
-rw-r--r--tftf/tests/tests-timer-stress.mk10
-rw-r--r--tftf/tests/tests-timer-stress.xml15
-rw-r--r--tftf/tests/tests-trng.mk7
-rw-r--r--tftf/tests/tests-tsp.mk3
-rw-r--r--tftf/tests/tests-tsp.xml7
-rw-r--r--tftf/tests/tests-uncontainable.mk4
-rw-r--r--tftf/tests/tests-undef-injection.mk7
-rw-r--r--tftf/tests/tests-undef-injection.xml14
-rw-r--r--tftf/tests/tests-versal.mk12
-rw-r--r--tftf/tests/tests-versal.xml20
-rw-r--r--tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c3
-rwxr-xr-xtools/generate_dtb/generate_dtb.sh6
-rwxr-xr-xtools/generate_json/generate_json.sh107
-rwxr-xr-xtools/generate_test_list/generate_test_list.pl193
-rwxr-xr-xtools/generate_test_list/generate_test_list.py363
-rw-r--r--tools/generate_test_list/tests_list.c.tpl7
-rw-r--r--tools/generate_test_list/tests_list.h.tpl6
463 files changed, 36156 insertions, 5818 deletions
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 000000000..6e6fa8430
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,29 @@
+# Copyright (c) 2023, Arm Limited. All rights reserved
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Configuration file for the readthedocs deploy
+# Available at https://trustedfirmware-a.readthedocs.io/en/latest/
+
+
+# readthedocs config version
+version: 2
+
+build:
+ os: ubuntu-22.04 # Ubuntu Jammy LTS
+ tools:
+ python: "3.10"
+ apt_packages:
+ - plantuml
+
+python:
+ install:
+ - requirements: docs/requirements.txt
+
+sphinx:
+ configuration: docs/conf.py
+
+# Auxiliary formats to export to (in addition to the default HTML output).
+formats:
+ - pdf
+
diff --git a/Makefile b/Makefile
index 8eba9403b..f88cd3f04 100644
--- a/Makefile
+++ b/Makefile
@@ -1,12 +1,12 @@
#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# TFTF Version
VERSION_MAJOR := 2
-VERSION_MINOR := 5
+VERSION_MINOR := 10
MAKE_HELPERS_DIRECTORY := make_helpers/
include ${MAKE_HELPERS_DIRECTORY}build_macros.mk
@@ -55,6 +55,19 @@ else
endif
export Q
+################################################################################
+# Toolchain configs
+################################################################################
+CC := ${CROSS_COMPILE}gcc
+CPP := ${CROSS_COMPILE}cpp
+AS := ${CROSS_COMPILE}gcc
+AR := ${CROSS_COMPILE}ar
+LD := ${CROSS_COMPILE}ld
+OC := ${CROSS_COMPILE}objcopy
+OD := ${CROSS_COMPILE}objdump
+NM := ${CROSS_COMPILE}nm
+PP := ${CROSS_COMPILE}gcc
+
ifneq (${DEBUG}, 0)
BUILD_TYPE := debug
# Use LOG_LEVEL_INFO by default for debug builds
@@ -67,7 +80,7 @@ endif
# Default build string (git branch and commit)
ifeq (${BUILD_STRING},)
- BUILD_STRING := $(shell git log -n 1 --pretty=format:"%h")
+ BUILD_STRING := $(shell git describe --always --dirty --tags 2> /dev/null)
endif
VERSION_STRING := v${VERSION_MAJOR}.${VERSION_MINOR}(${PLAT},${BUILD_TYPE}):${BUILD_STRING}
@@ -94,6 +107,10 @@ ifeq ($(wildcard ${PLAT_MAKEFILE_FULL}),)
$(error "Error: Invalid platform. The following platforms are available: ${PLATFORMS}")
endif
+
+EL3_PAYLOAD_PLAT_PATH := $(shell find el3_payload/plat/ -wholename '*/${PLAT}')
+EL3_PAYLOAD_PLAT_MAKEFILE_FULL := ${EL3_PAYLOAD_PLAT_PATH}/${PLAT_MAKEFILE}
+
.PHONY: all
all: msg_start
@@ -102,22 +119,31 @@ msg_start:
@echo "Building ${PLAT}"
@echo "Selected set of tests: ${TESTS}"
+# Set flags for Realm Payload Tests
+ifeq (${ENABLE_REALM_PAYLOAD_TESTS},1)
+ARM_ARCH_MINOR := 5
+BRANCH_PROTECTION := 2
+endif
+
# Include test images makefiles.
include tftf/framework/framework.mk
include tftf/tests/tests.mk
include fwu/ns_bl1u/ns_bl1u.mk
include fwu/ns_bl2u/ns_bl2u.mk
-# Only platform fvp supports cactus_mm, ivy, quark
+# List of secure partitions present.
+SECURE_PARTITIONS :=
+
+# Only platform fvp supports cactus_mm
ifeq (${ARCH}-${PLAT},aarch64-fvp)
include spm/cactus_mm/cactus_mm.mk
-include spm/ivy/ivy.mk
-include spm/quark/quark.mk
+include realm/realm.mk
endif
-# cactus is supported on platforms: fvp, tc0
-ifeq (${ARCH}-${PLAT},$(filter ${ARCH}-${PLAT},aarch64-fvp aarch64-tc0))
+# cactus and ivy are supported on platforms: fvp, tc
+ifeq (${ARCH}-${PLAT},$(filter ${ARCH}-${PLAT},aarch64-fvp aarch64-tc))
include spm/cactus/cactus.mk
+include spm/ivy/ivy.mk
endif
################################################################################
@@ -141,6 +167,9 @@ $(eval $(call assert_boolean,FIRMWARE_UPDATE))
$(eval $(call assert_boolean,FWU_BL_TEST))
$(eval $(call assert_boolean,NEW_TEST_SESSION))
$(eval $(call assert_boolean,USE_NVM))
+$(eval $(call assert_numeric,BRANCH_PROTECTION))
+$(eval $(call assert_boolean,ENABLE_REALM_PAYLOAD_TESTS))
+$(eval $(call assert_boolean,TRANSFER_LIST))
################################################################################
# Process build options
@@ -165,17 +194,21 @@ $(eval $(call add_define,TFTF_DEFINES,LOG_LEVEL))
$(eval $(call add_define,TFTF_DEFINES,NEW_TEST_SESSION))
$(eval $(call add_define,TFTF_DEFINES,PLAT_${PLAT}))
$(eval $(call add_define,TFTF_DEFINES,USE_NVM))
+$(eval $(call add_define,TFTF_DEFINES,ENABLE_REALM_PAYLOAD_TESTS))
+$(eval $(call add_define,TFTF_DEFINES,TRANSFER_LIST))
################################################################################
+################################################################################
# Assembler, compiler and linker flags shared across all test images.
+################################################################################
COMMON_ASFLAGS :=
COMMON_CFLAGS :=
COMMON_LDFLAGS :=
ifeq (${DEBUG},1)
-COMMON_CFLAGS += -g
-COMMON_ASFLAGS += -g -Wa,--gdwarf-2
+COMMON_CFLAGS += -g -gdwarf-4
+COMMON_ASFLAGS += -g -Wa,--gdwarf-4
endif
# Set the compiler's target architecture profile based on ARM_ARCH_MINOR option
@@ -201,36 +234,68 @@ endif
$(info Arm Architecture Features specified: $(subst +, ,$(arch-features)))
endif # arch-features
-COMMON_ASFLAGS_aarch64 := -mgeneral-regs-only ${march64-directive}
-COMMON_CFLAGS_aarch64 := -mgeneral-regs-only -mstrict-align ${march64-directive}
+################################################################################
+# Compiler settings
+################################################################################
+ifneq ($(findstring clang,$(notdir $(CC))),)
+CLANG_CFLAGS_aarch64 := -target aarch64-elf
+
+CPP := $(CC) -E $(COMMON_CFLAGS_$(ARCH))
+PP := $(CC) -E $(COMMON_CFLAGS_$(ARCH))
+
+CLANG_WARNINGS += -nostdinc -ffreestanding -Wall \
+ -Wmissing-include-dirs $(CLANG_CFLAGS_$(ARCH)) \
+ -Wlogical-op-parentheses \
+ -Wno-initializer-overrides \
+ -Wno-sometimes-uninitialized \
+ -Wno-unused-function \
+ -Wno-unused-variable \
+ -Wno-unused-parameter \
+ -Wno-tautological-compare \
+ -Wno-memset-transposed-args \
+ -Wno-parentheses
+
+CLANG_CFLAGS += -Wno-error=deprecated-declarations \
+ -Wno-error=cpp \
+ $(CLANG_WARNINGS)
+endif #(clang)
+
+ifneq ($(findstring gcc,$(notdir $(CC))),)
+GCC_CFLAGS_aarch32 := ${march32-directive} -mno-unaligned-access
+GCC_CFLAGS_aarch64 := -mgeneral-regs-only
+
+GCC_ASFLAGS_aarch32 := ${march32-directive}
+GCC_ASFLAGS_aarch64 := -mgeneral-regs-only ${march64-directive}
+
+GCC_WARNINGS += -nostdinc -ffreestanding -Wall -Werror \
+ -Wmissing-include-dirs $(GCC_CFLAGS_$(ARCH)) \
+ -std=gnu99 -Os
+
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105523
+GCC_CFLAGS += $(call cc_option, --param=min-pagesize=0)
+GCC_CFLAGS += $(GCC_WARNINGS)
+endif #(gcc)
-COMMON_ASFLAGS_aarch32 := ${march32-directive}
-COMMON_CFLAGS_aarch32 := ${march32-directive} -mno-unaligned-access
+COMMON_CFLAGS_aarch64 += ${march64-directive} -mstrict-align \
+ $(CLANG_CFLAGS_$(ARCH)) $(GCC_CFLAGS_$(ARCH))
-COMMON_ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \
- -Werror -Wmissing-include-dirs \
- -D__ASSEMBLY__ $(COMMON_ASFLAGS_$(ARCH)) \
- ${INCLUDES}
-COMMON_CFLAGS += -nostdinc -ffreestanding -Wall -Werror \
- -Wmissing-include-dirs $(COMMON_CFLAGS_$(ARCH)) \
- -std=gnu99 -Os
+COMMON_CFLAGS += $(COMMON_CFLAGS_$(ARCH))
COMMON_CFLAGS += -ffunction-sections -fdata-sections
# Get the content of CFLAGS user defined value last so they are appended after
# the options defined in the Makefile
-COMMON_CFLAGS += ${CFLAGS} ${INCLUDES}
+COMMON_CFLAGS += ${CLANG_CFLAGS} ${GCC_CFLAGS} ${INCLUDES}
+
+COMMON_ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \
+ -Werror -Wmissing-include-dirs \
+ -D__ASSEMBLY__ $(GCC_ASFLAGS_$(ARCH)) \
+ ${INCLUDES}
-COMMON_LDFLAGS += --fatal-warnings -O1 --gc-sections --build-id=none
+COMMON_LDFLAGS += ${LDFLAGS} --fatal-warnings -O1 --gc-sections --build-id=none
-CC := ${CROSS_COMPILE}gcc
-CPP := ${CROSS_COMPILE}cpp
-AS := ${CROSS_COMPILE}gcc
-AR := ${CROSS_COMPILE}ar
-LD := ${CROSS_COMPILE}ld
-OC := ${CROSS_COMPILE}objcopy
-OD := ${CROSS_COMPILE}objdump
-NM := ${CROSS_COMPILE}nm
-PP := ${CROSS_COMPILE}gcc
+# With ld.bfd version 2.39 and newer new warnings are added. Skip those since we
+# are not loaded by a elf loader.
+COMMON_LDFLAGS += $(call ld_option, --no-warn-rwx-segments)
################################################################################
@@ -248,7 +313,7 @@ NS_BL2U_CFLAGS += -mbranch-protection=${BP_OPTION}
CACTUS_MM_CFLAGS += -mbranch-protection=${BP_OPTION}
CACTUS_CFLAGS += -mbranch-protection=${BP_OPTION}
IVY_CFLAGS += -mbranch-protection=${BP_OPTION}
-QUARK_CFLAGS += -mbranch-protection=${BP_OPTION}
+REALM_CFLAGS += -mbranch-protection=${BP_OPTION}
endif
ifeq ($(SMC_FUZZING), 1)
@@ -289,15 +354,14 @@ CACTUS_LDFLAGS += ${COMMON_LDFLAGS} $(PIE_LDFLAGS)
IVY_SOURCES += ${LIBC_SRCS}
IVY_INCLUDES += ${PLAT_INCLUDES}
-IVY_CFLAGS += ${COMMON_CFLAGS}
+IVY_CFLAGS += ${COMMON_CFLAGS} -fpie
IVY_ASFLAGS += ${COMMON_ASFLAGS}
-IVY_LDFLAGS += ${COMMON_LDFLAGS}
+IVY_LDFLAGS += ${COMMON_LDFLAGS} $(PIE_LDFLAGS)
-QUARK_SOURCES += ${LIBC_SRCS}
-QUARK_INCLUDES += ${PLAT_INCLUDES}
-QUARK_CFLAGS += ${COMMON_CFLAGS}
-QUARK_ASFLAGS += ${COMMON_ASFLAGS}
-QUARK_LDFLAGS += ${COMMON_LDFLAGS}
+REALM_SOURCES += ${LIBC_SRCS}
+REALM_CFLAGS += ${COMMON_CFLAGS} -fpie
+REALM_ASFLAGS += ${COMMON_ASFLAGS}
+REALM_LDFLAGS += ${COMMON_LDFLAGS} $(PIE_LDFLAGS)
.PHONY: locate-checkpatch
locate-checkpatch:
@@ -313,14 +377,18 @@ endif
clean:
@echo " CLEAN"
${Q}rm -rf ${BUILD_PLAT}
+ifneq ($(wildcard ${EL3_PAYLOAD_PLAT_MAKEFILE_FULL}),)
${MAKE} -C el3_payload clean
+endif
.PHONY: realclean distclean
realclean distclean:
@echo " REALCLEAN"
${Q}rm -rf ${BUILD_BASE}
${Q}rm -f ${CURDIR}/cscope.*
+ifneq ($(wildcard ${EL3_PAYLOAD_PLAT_MAKEFILE_FULL}),)
${MAKE} -C el3_payload distclean
+endif
.PHONY: checkcodebase
checkcodebase: locate-checkpatch
@@ -367,21 +435,22 @@ cactus_mm:
@echo "ERROR: $@ is supported only on AArch64 FVP."
@exit 1
-.PHONY: ivy
-ivy:
+.PHONY: realm
+realm:
@echo "ERROR: $@ is supported only on AArch64 FVP."
@exit 1
-.PHONY: quark
-quark:
- @echo "ERROR: $@ is supported only on AArch64 FVP."
- @exit 1
endif
-ifneq (${ARCH}-${PLAT},$(filter ${ARCH}-${PLAT},aarch64-fvp aarch64-tc0))
+ifneq (${ARCH}-${PLAT},$(filter ${ARCH}-${PLAT},aarch64-fvp aarch64-tc))
.PHONY: cactus
cactus:
- @echo "ERROR: $@ is supported only on AArch64 FVP or TC0."
+ @echo "ERROR: $@ is supported only on AArch64 FVP or TC."
+ @exit 1
+
+.PHONY: ivy
+ivy:
+ @echo "ERROR: $@ is supported only on AArch64 FVP or TC."
@exit 1
endif
@@ -494,22 +563,27 @@ all : $(1)
endef
+ifeq (${ARCH},aarch32)
+ ARCH_TESTS_SKIP_LIST := tftf/tests/aarch32_tests_to_skip.txt
+endif
+
$(AUTOGEN_DIR):
$(Q)mkdir -p "$@"
-$(AUTOGEN_DIR)/tests_list.c $(AUTOGEN_DIR)/tests_list.h: $(AUTOGEN_DIR) ${TESTS_FILE} ${PLAT_TESTS_SKIP_LIST}
+$(AUTOGEN_DIR)/tests_list.c $(AUTOGEN_DIR)/tests_list.h: $(AUTOGEN_DIR) ${TESTS_FILE} ${PLAT_TESTS_SKIP_LIST} $(ARCH_TESTS_SKIP_LIST)
@echo " AUTOGEN $@"
- tools/generate_test_list/generate_test_list.pl $(AUTOGEN_DIR)/tests_list.c $(AUTOGEN_DIR)/tests_list.h ${TESTS_FILE} $(PLAT_TESTS_SKIP_LIST)
+ tools/generate_test_list/generate_test_list.py $(AUTOGEN_DIR)/tests_list.c \
+ $(AUTOGEN_DIR)/tests_list.h ${TESTS_FILE} \
+ --plat-skip-file=$(PLAT_TESTS_SKIP_LIST) \
+ --arch-skip-file=$(ARCH_TESTS_SKIP_LIST)
ifeq ($(SMC_FUZZING), 1)
$(Q)mkdir -p ${BUILD_PLAT}/smcf
dtc ${SMC_FUZZ_DTS} >> ${BUILD_PLAT}/smcf/dtb
$(OC) -I binary -O elf64-littleaarch64 -B aarch64 ${BUILD_PLAT}/smcf/dtb ${BUILD_PLAT}/smcf/dtb.o \
- --redefine-sym _binary___build_fvp_debug_smcf_dtb_start=_binary___dtb_start \
- --redefine-sym _binary___build_fvp_debug_smcf_dtb_end=_binary___dtb_end
+ --redefine-sym _binary___build_$(PLAT)_$(BUILD_TYPE)_smcf_dtb_start=_binary___dtb_start \
+ --redefine-sym _binary___build_$(PLAT)_$(BUILD_TYPE)_smcf_dtb_end=_binary___dtb_end
endif
-$(eval $(call MAKE_IMG,tftf))
-
ifeq ($(FIRMWARE_UPDATE), 1)
$(eval $(call MAKE_IMG,ns_bl1u))
$(eval $(call MAKE_IMG,ns_bl2u))
@@ -519,23 +593,50 @@ ifeq (${ARCH}-${PLAT},aarch64-fvp)
$(eval $(call MAKE_IMG,cactus_mm))
$(eval $(call MAKE_IMG,cactus))
$(eval $(call MAKE_IMG,ivy))
- $(eval $(call MAKE_IMG,quark))
endif
-ifeq (${ARCH}-${PLAT},aarch64-tc0)
+.PHONY : tftf
+ $(eval $(call MAKE_IMG,tftf))
+
+# Build flag 'ENABLE_REALM_PAYLOAD_TESTS=1' builds and pack Realm Payload Tests
+ifeq (${ENABLE_REALM_PAYLOAD_TESTS},1)
+ $(eval $(call MAKE_IMG,realm))
+
+# This forces to rebuild tftf.bin. For incremental build this re-creates tftf.bin
+# and removes the old realm payload packed by the last build.
+.PHONY : $(BUILD_PLAT)/tftf.bin
+
+tftf: realm
+ @echo " PACK REALM PAYLOAD"
+ $(shell dd if=$(BUILD_PLAT)/realm.bin of=$(BUILD_PLAT)/tftf.bin obs=1 \
+ oflag=append conv=notrunc)
+endif
+
+ifeq (${ARCH}-${PLAT},aarch64-tc)
$(eval $(call MAKE_IMG,cactus))
+ $(eval $(call MAKE_IMG,ivy))
endif
+SP_LAYOUT:
+ ${Q}tools/generate_json/generate_json.sh \
+ $(BUILD_PLAT) $(SECURE_PARTITIONS)
+
# The EL3 test payload is only supported in AArch64. It has an independent build
# system.
.PHONY: el3_payload
+# TODO: EL3 test payload currently is supported for GCC only. It has an independent
+# build system and support for Clang to be added.
+ifneq ($(findstring gcc,$(notdir $(CC))),)
ifneq (${ARCH},aarch32)
+ifneq ($(wildcard ${EL3_PAYLOAD_PLAT_MAKEFILE_FULL}),)
el3_payload: $(BUILD_DIR)
${Q}${MAKE} -C el3_payload PLAT=${PLAT}
${Q}find "el3_payload/build/${PLAT}" -name '*.bin' -exec cp {} "${BUILD_PLAT}" \;
all: el3_payload
endif
+endif
+endif
doc:
@echo " BUILD DOCUMENTATION"
@@ -551,7 +652,7 @@ cscope:
.SILENT: help
help:
echo "usage: ${MAKE} PLAT=<${PLATFORMS}> \
-<all|tftf|ns_bl1u|ns_bl2u|cactus|ivy|quark|el3_payload|distclean|clean|checkcodebase|checkpatch|help_tests>"
+<all|tftf|ns_bl1u|ns_bl2u|cactus|ivy|el3_payload|distclean|clean|checkcodebase|checkpatch|help_tests>"
echo ""
echo "PLAT is used to specify which platform you wish to build."
echo "If no platform is specified, PLAT defaults to: ${DEFAULT_PLAT}"
@@ -562,10 +663,9 @@ help:
echo " tftf Build the TFTF image"
echo " ns_bl1u Build the NS_BL1U image"
echo " ns_bl2u Build the NS_BL2U image"
- echo " cactus Build the Cactus image (Test S-EL0 payload) and resource description."
- echo " cactus_mm Build the Cactus-MM image (Test S-EL0 payload)."
- echo " ivy Build the Ivy image (Test S-EL0 payload) and resource description."
- echo " quark Build the Quark image (Test S-EL0 payload) and resource description."
+ echo " cactus Build the Cactus image (FF-A S-EL1 test payload)."
+ echo " cactus_mm Build the Cactus-MM image (SPM-MM S-EL0 test payload)."
+ echo " ivy Build the Ivy image (FF-A S-EL0 test payload)."
echo " el3_payload Build the EL3 test payload"
echo " checkcodebase Check the coding style of the entire source tree"
echo " checkpatch Check the coding style on changes in the current"
diff --git a/branch_protection.mk b/branch_protection.mk
index c16cdad8e..86f197f95 100644
--- a/branch_protection.mk
+++ b/branch_protection.mk
@@ -10,9 +10,6 @@
# poised to handle dependencies, as all build variables would have a default
# value by then.
-# Select the branch protection features to use.
-BRANCH_PROTECTION := 0
-
# Flag to enable Branch Target Identification in the TFTF.
# Internal flag not meant for direct setting.
# Use BRANCH_PROTECTION to enable BTI.
diff --git a/docs/about/contact.rst b/docs/about/contact.rst
index a84dc5218..bb1e2a497 100644
--- a/docs/about/contact.rst
+++ b/docs/about/contact.rst
@@ -20,14 +20,14 @@ You can see a `summary of all the lists`_ on the TrustedFirmware.org website.
Issue Tracker
^^^^^^^^^^^^^
-Specific issues may be raised using the `issue tracker`_ on the
-TrustedFirmware.org website. Using this tracker makes it easy for the
-maintainers to prioritise and respond to your ticket.
+Specific issues may be raised using the `issue tracker`_ on Github. Using this
+tracker makes it easy for the maintainers to prioritise and respond to your
+ticket.
--------------
-*Copyright (c) 2019, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2024, Arm Limited. All rights reserved.*
-.. _`issue tracker`: https://developer.trustedfirmware.org
-.. _`TF-A-Tests development`: https://lists.trustedfirmware.org/pipermail/tf-a-tests/
-.. _`summary of all the lists`: https://lists.trustedfirmware.org
+.. _`issue tracker`: https://github.com/TrustedFirmware-A/tf-a-tests/issues
+.. _`TF-A-Tests development`: https://lists.trustedfirmware.org/mailman3/lists/tf-a-tests.lists.trustedfirmware.org/
+.. _`summary of all the lists`: https://lists.trustedfirmware.org/mailman3/lists/
diff --git a/docs/about/features.rst b/docs/about/features.rst
index dbaec1279..81212303c 100644
--- a/docs/about/features.rst
+++ b/docs/about/features.rst
@@ -19,6 +19,7 @@ not exhaustive):
- `Firmware update`_ (or recovery mode)
- `EL3 payload boot flow`_
- Secure partition support
+- `True Random Number Generator Firmware Interface (TRNG_FW)`_
These tests are not a compliance test suite for the Arm interface standards used
in TF-A (such as PSCI).
@@ -42,7 +43,7 @@ Still to come
--------------
-*Copyright (c) 2019-2020, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2023, Arm Limited. All rights reserved.*
.. _SMC Calling Convention: https://developer.arm.com/docs/den0028/latest
.. _Power State Coordination Interface (PSCI): PSCI_
@@ -55,3 +56,5 @@ Still to come
.. _TSP: https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/tree/bl32/tsp
.. _Firmware update: https://trustedfirmware-a.readthedocs.io/en/latest/components/firmware-update.html
.. _EL3 payload boot flow: https://trustedfirmware-a.readthedocs.io/en/latest/design/alt-boot-flows.html#el3-payloads-alternative-boot-flow
+.. _True Random Number Generator Firmware Interface (TRNG_FW): TRNG_FW_
+.. _TRNG_FW: https://developer.arm.com/documentation/den0098/latest
diff --git a/docs/about/maintainers.rst b/docs/about/maintainers.rst
index 2d85f659d..f7cf3deb1 100644
--- a/docs/about/maintainers.rst
+++ b/docs/about/maintainers.rst
@@ -22,4 +22,26 @@ Maintainers List
--------------
+Code owners
+-----------
+
+Platform Ports
+~~~~~~~~~~~~~~
+
+Xilinx platform port
+^^^^^^^^^^^^^^^^^^^^
+:|M|: Michal Simek <michal.simek@amd.com>
+:|G|: `michalsimek`_
+:|M|: Amit Nagal <amit.nagal@amd.com>
+:|G|: `amit-nagal`_
+:|M|: Akshay Belsare <akshay.belsare@amd.com>
+:|G|: `Akshay-Belsare`_
+:|F|: plat/xilinx\*
+:|F|: tftf/tests/plat/xilinx\*
+:|F|: docs/plat/xilinx-\*
+:|F|: tftf/tests/tests-versal.mk
+:|F|: tftf/tests/tests-versal.xml
+
+--------------
+
*Copyright (c) 2018-2020, Arm Limited. All rights reserved.*
diff --git a/docs/change-log.rst b/docs/change-log.rst
index 136bbd494..aaa04bd5e 100644
--- a/docs/change-log.rst
+++ b/docs/change-log.rst
@@ -7,6 +7,571 @@ Firmware-A version for simplicity. At any point in time, TF-A Tests version
Tests are not guaranteed to be compatible. This also means that a version
upgrade on the TF-A-Tests side might not necessarily introduce any new feature.
+Version 2.10
+------------
+
+New features
+^^^^^^^^^^^^
+
+- More tests are made available in this release to help validate the
+ functionalities in the following areas:
+
+ - FF-A
+ - Realm Management Extension
+ - EL3 Runtime
+ - New Platform ports
+
+TFTF
+~~~~
+
+- FF-A testing:
+
+ - Fixing FF-A version tests and expected error codes.
+ - Remove SPM tests from AArch32 builds.
+ - Support extended set of registers with FF-A calls.
+ - Fix use of instruction permissions in FF-A memory sharing tests.
+ - Extend memory sharing tests that use the clear memory flags.
+ - Test that memory from Root World/Realm can't be shared.
+ - Test the compliance to SMCCC at the non-secure physical instance.
+ - Exercise secure eSPI interrupt handling.
+
+- New tests:
+
+ - Added test for Errata management firmware interface.
+ - Added basic firmware handoff tests.
+ - Test to verify SErrors synchronized at EL3 boundry.
+ - Introduced RAS KFH support test.
+ - Modified FEAT_FGT test to check for init values.
+ - Updated test_psci_stat.c to support more power states.
+
+- Platforms:
+
+ - TC:
+
+ - Made TC0 TFTF code generic to TC.
+
+ - Versal:
+
+ - Added platform support and platform specific cases.
+ - Added Versal documentation.
+
+ - Versal NET:
+
+ - Added platform support and platform specific cases.
+ - Added Versal NET documentation.
+
+ - Xilinx:
+ - Reorganized timer code into common path.
+
+- Miscellaneous:
+
+ - Added helper routines to read, write and compare SVE and FPU registers.
+ - New CPU feature detection helpers.
+ - Introduced clang toolchain support and added python generate_test_list
+ script.
+ - Docs: Updated toolchain requirements and added maintainers for AMD-Xilinx.
+ - Tidy setup and discovery logs.
+ - Added note on building TFA-Tests using clang docs.
+ - Added SME helper routines and added Streaming SVE support.
+ - Introduced SError exception handler.
+ - Updated toolchain requirements documentation.
+ - Check for support for ESPI before testing it.
+
+Realm Management Extension (RME)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Added SVE Realm tests and tests for EAC1.
+ - Test to intermittently switch to Realm while doing NS SVE and Streaming
+ SVE ops.
+ - Added tests to check NS SME ID registers and configurations.
+ - Added test to check if RMM doesn't leak Realm contents in SVE registers.
+ - Test to check if non SVE Realm gets undefined abort.
+ - Test to check various SIMD state preserved across NS/RL switch.
+ - Added test to check swtich SME registers to SIMD state.
+ - Testcase for CPU_ON denied.
+ - Test for multiple REC single CPU.
+ - Test for PAuth in Realm.
+ - Enhanced FPU state verification test.
+ - Modified API of RMI_RTT_*_RIPAS, changed handling.
+ - Removed RIPAS_UNDEFINED and modified RIPAS/HIPAS definitions for EAC2.
+ - Removed RMI_VALID_NS status and RMI_ERROR_IN_USE error code
+ RMI_RTT_UNMAP_UNPROTECTED and update API of data/rtt functions.
+ - Updated RSI_VERSION, RMI_VERSION and modified rmi_realm_params structure.
+ - Added support for PMU as per RMM Specification 1.0-eac2.
+ - Added PSCI API to Realms and API for REC force exit.
+ - Added support for multiple REC and CPU and data buffer to pass arg to REC.
+ - Set size of RsiHostCall.gprs[] to 31.
+ - Passing RD pointer in arg0 register X1.
+ - Added host call to flush Realm prints.
+ - Aligned Realm stack.
+ - Introduced new build flag for RME stack and appended realm.bin at end of
+ tftf.bin.
+
+Cactus (Secure-EL1 test partition)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Test discovery of EL3 SPMD managed SPs.
+ - Configure partitions load-address from SP layout file.
+ - Use the non-secure memory attribute in descriptor obtain from
+ FFA_MEM_RETRIEVE_RESP.
+ - SPs configured with a unique boot-order field in their respective
+ manifests.
+ - Test to the FFA_PARTITION_INFO_GET_REGS interface.
+ - Defined memory security state attribute for memory transaction desciptor.
+
+Issues resolved since last release
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ - Fixed incremental build issue with Realm payload and build dependency
+ in test-realms-payload.
+ - SME: use of rdsvl instead of rdvl, enable SME/SME2 during arch init,
+ align test vector arrays to 16 bytes.
+ - SVE: representing Z0-Z31 as array of bytes and moved operation to a lib
+ routine.
+ - Fixed issue in processing dynamic relocations for AArch64.
+ - Reclaim and check for shared memory now supported.
+ - FPU replaced read with write of random value to fpsr/fpcr.
+ - Disabled RMI tests when building for AArch32 architecture.
+ - Fixed command id passed to Realm to compare FPU registers.
+ - Fixed broken links in docs landing page and made generate_test_list
+ backward compatible.
+ - XLAT: added support for 52 bit PA size with 4KB granularity.
+ - Fixed stress test for XLAT v2.
+ - RAS: Moved wait logic from assembly to C and renamed SDEI related
+ functions/events.
+
+Version 2.9
+-----------
+
+New features
+^^^^^^^^^^^^
+
+- More tests are made available in this release to help validate the
+ functionalities in the following areas:
+
+ - FF-A Features
+ - Realm Management Extension
+ - New Architecture Specific features related to v8.8
+ - New platform ports
+
+TFTF
+~~~~
+
+- FF-A testing:
+
+ - Reordered logs in the memory sharing tests.
+ - Memory share bumped to v1.1 EAC0.
+ - Updated tests for FFA_FEATURES(FFA_MEM_RETRIEVE_REQ).
+ - Fixed issues with RXTX buffer unmapping and dependencies on tests.
+ - Added check for execution state property of partitions.
+
+- New tests:
+
+ - Tests for Errata management firmware interface.
+ - Ensure FPU state registers context is preserved in RL/SE/NS.
+ - Modified FEAT_HCX test to also check for HCRX_EL2 init value.
+ - Added basic SME2 tests.
+ - PSCI tests for OS-initiated mode.
+ - Added "nop" test to be used in conjunction with TFX.
+ - Introduced capability to generate Sync External Aborts (EA) in TFTF.
+ - New test to generate an SError.
+ - Tests to check whether the PMU is functional and if the state is
+ preserved when switching worlds. PMEVTYPER.evtCount width extended.
+ - Added support for more SPE versions.
+
+- Platforms:
+
+ - RD-N2-Cfg3:
+
+ - Added TFTF support.
+
+- Miscellaneous:
+
+ - SIMD/FPU save/restore routine moved to common lib.
+ - Updated toolchain requirements documentation.
+ - Update SME/Mortlach tests.
+ - Unified Firmware First handling of lower EL EA.
+ - Moved trusted wdog API to spm_common.
+ - Added the ability to skip tests for AArch32.
+ - Added config file to allow doc defaults be changed.
+ - Modified tests for FEAT_RNG_TRAP.
+ - Moved 'Stress test timer framework' to a new test suite
+ 'tests-timer-stress'.
+ - Support for new binutils versions.
+ - Removed deprecated SPM libs and test code.
+
+
+Realm Management Extension (RME)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Added helper macro for RME tests.
+ - Test Secure interrupt can preempt Realm EL1.
+ - Added PMU Realm tests.
+ - Added BP_OPTION to REALM_CFLAGS to allow build realm payload with
+ BTI/Pauth support.
+ - Fixed build issues introduced by the TFTF Realm extension
+ enhancement tests.
+ - Test case return codes updated according to RMM Bet0 specification.
+ - Fixed build problem related to rmi_rec_enter verbose log.
+ - Added randomization of SMC RMI commands parameters and checking of
+ X4-X7 return values as per SMCCC v1.2.
+
+Cactus (Secure-EL1 test partition)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Use of FFA_CONSOLE_LOG for debug logs.
+ - Test for consecutive same memory donation to other partitions.
+ - Now validating NWd can't share forbidden addresses.
+ - Support for registering irq handlers.
+ - Fixed attributes for NS memory region.
+ - Removal of memory regions not page-aligned.
+ - Added check for core linear id matching id passed by SPMC.
+
+Issues resolved since last release
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ - Build issue for older toolchains and other failures resolved.
+ - Dropped invalid access test from CI.
+ - Now checking that the PMU is supported before using any of it.
+ - Use of write instead of read to generate an SError to avoid sync
+ exceptions instead.
+ - Fixed broken link to TRNG_FW documentation.
+ - INIT_TFTF_MAILBOX() is called first for the invalid access test.
+
+Version 2.8
+-----------
+
+New features
+^^^^^^^^^^^^
+- More tests are made available in this release to help validate the
+ functionalities in the following areas:
+
+ - FF-A Features
+ - Realm Management Extension
+ - New Architecture Specific features related to v8.8
+ - New platform ports
+
+TFTF
+~~~~
+
+- FF-A testing:
+
+ - UUID included in partition information descriptors.
+ - Checks for size of partition information descriptors.
+ - Renamed FFA_MSG_RUN ABI function to FFA_RUN and allowed it to return from
+ Waiting state.
+ - Made ffa_tests available for Ivy.
+ - Updated verbose message log structure.
+ - Prevented generate_json.sh from being called more than once by requiring
+ a list of partitions to be supplied.
+ - Added a temporary workaround for unexpected affinity info state to prevent
+ a system panic.
+ - Added test to exercise FFA_CONSOLE_LOG ABI.
+
+ - FF-A v1.1 Secure interrupts
+
+ - Added managed exit to first and second SP in call chain.
+ - Added test to exercise managed exit by two SPs in a call chain.
+ - Added tests to exercise NS interrupt being queued and signaled to SP.
+
+- New tests:
+
+ - Tests for SVE operations in Normal World and discover SVE vector length.
+ - Added cleanup TRNG service tests.
+ - Added test for SMCCC_ARCH_WORKAROUND_3.
+ - Updated PAuth helpers to support QARMA3 algorithm.
+ - Added tests for RNG_TRAP.
+
+- Platforms:
+
+ - SGI:
+
+ - Introduced platform variant build option.
+ - Re-organized header files.
+ - Migrated to secure uart port for routing tftf logs.
+
+ - N1SDP:
+
+ - Added TFTF support for N1SDP.
+
+ - RD-N2:
+
+ - Added TFTF support for RD-N2.
+
+ - RD-N2-Cfg1:
+
+ - Added TFTF support for RD-N2-Cfg1.
+
+ - RD-V1:
+
+ - Added TFTF support for RD-V1.
+
+- Miscellaneous:
+
+ - Added a missing ISB instruction in SME test.
+ - Refactor to make some helper functions re-usable.
+ - Updated build command to clean EL3 payload image.
+ - Move renaming of the primary dts file for ivy partitions.
+ - Added check that verifies if a platform supports el3_payload before
+ building it.
+ - Updated memory share test to meet Hafnium specification.
+ - Updated toolchain requirements documentation.
+
+
+Realm Management Extension (RME)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Added Realm payload management capabilities to TFTF to act as a NS Host.
+ - Added test to verify that RMM and SPM can co-exist and work properly.
+ - Added function to reset delegated buffers to non-delegated state.
+ - Re-used existing wait_for_non_lead_cpus() function helper.
+ - Refactored RMI FID macros to simplify usage.
+ - Added userguide for realm payload testing.
+
+Cactus (Secure-EL1 test partition)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Corrected some tests message types from ERROR to VERBOSE.
+ - Increased the cactus number of xlat to allow the use of 48b PA size for
+ memory sharing between SPs.
+ - Introduced a new direct request message command to resume after managed
+ exit.
+ - Skip enabling virtual maintenance interrupts explicitly.
+ - Allowed sender to resume interrupted target vCPU.
+ - Added support for handling managed exit through vIRQ.
+ - Added support for discovering interrupt IDs of managed exit signals.
+ - Specified action in response to NS interrupt in manifest.
+
+Ivy (Secure-EL0 test partition)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Allowed testing using VHE.
+ - Allowed Ivy partitions to use ffa_helpers functions.
+ - Requirement of common name for Ivy partitions for consistency.
+ - Specified action in response to NS interrupt in manifest.
+
+Issues resolved since last release
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ - Fixed SME header guard name.
+ - Fixed response for incorrect direct message request for FF-A.
+
+Version 2.7
+-----------
+
+New features
+^^^^^^^^^^^^
+- More tests are made available in this release to help validate the
+ functionalities in the following areas:
+
+ - FF-A Features
+ - New Architecture Specific features related to v8.7
+ - New platform port
+
+TFTF
+~~~~
+
+- FF-A testing:
+
+ - FF-A partition information structure is updated to include UUIDs.
+ - Memory Management helper functions are refactored to fetch the details
+ of smc call failures in tftf and cactus.
+ - Added test to validate memory sharing operations from SP to NS-endpoint
+ are denied by SPMC.
+ - Added test to ensure an endpoint that sets its version to v1.0 receives
+ v1.0 partition information descriptors as defined in v1.0 FF-A
+ specification.
+ - Added test to validate that memory is cleared on memory sharing operations
+ between normal world and secure world.
+
+ - FF-A v1.1 Secure interrupts
+
+ - Added support to enhance the secure interrupt handling test.
+ - Support for registering and unregistering custom handler that is
+ invoked by SP at the tail end of the virtual interrupt processing.
+ - Added support for querying the ID of the last serviced virtual interrupt.
+
+- New tests:
+
+ - Added test to validate that realm region access is being prevented from
+ normal world.
+ - Added test to validate that secure region access is being prevented from
+ normal world.
+ - Added test to validate that secure region access is being prevented from
+ realm world.
+ - Added test to validate that root region access is being prevented from
+ realm world.
+ - Added a test for v8.7 Advanced floating-point behavior (FEAT_AFP).
+ - Added a SPE test that reads static profiling system registers
+ of available SPE version i.e. FEAT_SPE/FEAT_SPEv1p1/FEAT_SPEv1p2.
+ - Added a test to validate functionality of WFET and WFIT instructions
+ introduced by v8.7 FEAT_WFxT.
+ - Added basic SME tests to ensure feature enablement by EL3 is proper for
+ its usage at lower non-secure ELs.
+ - Added test to check Data Independent timing (DIT) field of PSTATE is
+ retained on exception.
+ - Added test to ensure that EL3 has properly enabled access to FEAT_BRBE
+ from non-secure ELs.
+
+- Platforms:
+
+ - Add initial platform support for corstone1000.
+
+ - TC:
+
+ - Support for notification in tertiary SP manifest.
+
+ - FVP:
+
+ - Support to provide test memory addresses to validate the invalid
+ memory access test from tftf(ns-el2).
+
+- Miscellaneous:
+
+ - Added support to configure the physical/virtual address space for FVP.
+ - Added common header file for defining macros with size to support all the
+ platforms.
+ - Introduced handler for synchronous exceptions (AArch64).
+ - Added macros to extract the ISS portion of an ELx ESR exception syndrome
+ register.
+ - Support to dynamically map/unmap test region to validate invalid memory
+ access tests.
+ - Added support to receive boot information through secure partitions,
+ according to the FF-A v1.1 EAC0 specification.
+ - Added an helper API function from SPM test suite to initialize FFA-mailbox
+ and enable FF-A based message with SP.
+ - Updated the build string to display the rc-tagged version.
+
+Cactus (Secure-EL1 test partition)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Added test for nonsecure memory sharing between Secure Partitions(SPs).
+ - Added test to validate that a realm region cannot be accessed from secure
+ world.
+ - Added test to permit checking a root region cannot be accessed from secure
+ world.
+ - Extended the test command CACTUS_MEM_SEND_CMD to add support for memory
+ sharing flags.
+ - Added support to save the state of general purpose registers x0-x4 at the
+ entry to cold boot and restore them before jumping to entrypoint of cactus.
+
+Issues resolved since last release
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ - Fixed a bug to align RMI FIDs with SMCCC.
+ - Fixed encoding of vCPU and receiver IDs in the FFA_NOTIFICATION_GET
+ interface to comply with the FF-A v1.1 beta0 specification.
+ - Fixed memory retrieve request attributes by enforcing them to be inner
+ shareable rather than outer.
+ - Fixed static memory mapping of EL3 in EL2.
+ - Fixed a spurious error log message with memory share test.
+ - Aligning RMI FIDs with SMCCC.
+ - Fixed PSCI system suspend test suite execution in a four world system.
+ - Configured the build system to use DWARF 4 standard for debug builds with
+ ArmDS.
+ - Introduced macro IRQ_TWDOG_INTID for the Tegra210, Tegra186 and Tegra194
+ platforms to fix the compilation failures.
+
+Version 2.6
+-----------
+
+New features
+^^^^^^^^^^^^
+- More tests are made available in this release to help validate the
+ functionalities in the following areas:
+
+ - Firmware Framework for Arm A-profile(FF-A)
+ - Realm Management Extensions(RME)
+ - Embedded Trace Extension and Trace Buffer Extension (ETE and TRBE)
+
+TFTF
+~~~~
+
+- FF-A testing:
+
+ - Update FF-A version to v1.1
+ - Added helpers for SPM tests to check partition info of SPs from normal
+ world.
+ - Added tests to check for ffa_features supported.
+ - Added test for FFA_RXTX_UNMAP ABI.
+ - Added test for FFA_SPM_ID_GET.
+ - FF-A v1.1 Notifications
+
+ - Added test for notifications bitmap create and destroy ABIs.
+ - Added test for notifications set and get ABIs.
+ - Added test for notification INFO_GET ABI.
+ - Added test to check notifications pending interrupt is injected into
+ and handled by the expected vCPU in a MP setup.
+ - Added test for signaling from MP SP to UP SP.
+ - Added test to check notifications interrupt IDs retrieved with
+ FFA_FEATURES ABI.
+ - Added test to check functionality of notifications scheduled receiver
+ interrupt.
+
+ - FF-A v1.1 Secure interrupts
+
+ - Added support for handling secure interrupts in Cactus SP.
+ - Added several tests to exercise secure interrupt handling while SP
+ is in WAITING/RUNNING/BLOCKED state.
+
+- New tests:
+
+ - Enabled SVE tests
+ - Added test for trace system registers access.
+ - Added test for trace filter control registers access.
+ - Added test for trace buffer control registers access.
+ - Added test to check PSTATE in SDEI handler.
+ - Added test to check if HCRX_EL2 is accessible.
+
+- Platforms:
+
+ - TC0:
+
+ - Support for direct messaging with managed exit.
+ - Support for building S-EL0 Ivy partition.
+
+ - FVP:
+
+ - Update Cactus secure partitions to indicate Managed exit support.
+
+- Miscellaneous
+
+ - Added random seed generation capability and ability to specify build
+ parameters for SMC Fuzzer tool.
+
+Cactus (Secure-EL1 test partition)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Added helper for Cactus SP sleep.
+ - Added test commands to request use of notifications interfaces.
+ - Added several commands that generate direct message requests to assist in
+ testing secure interrupt handling and notifications features in FF-A v1.1
+ - Added support for SP805 Trusted Watchdog module.
+
+Ivy (Secure-EL1 test partition)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Add shim layer to Ivy partition and enable PIE.
+ - Define Ivy partition manifest and use FF-A for message handling.
+ - Prepare S-EL1/0 enviroment for enabling S-EL0 application.
+
+Realm Management Extension(RME)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ - Added tests to run RMI and SPM on multiple CPUs concurrently.
+ - Added tests for multi CPU delegation and fail conditions.
+ - Added tests to query RMI version on multiple CPUs.
+
+Issues resolved since last release
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ - Fixed Ivy partition start address for TC0.
+ - Fixed SP manifests to use little endian format UUID.
+ - Fixed a bug in memory sharing test for Cactus SP.
+ - Invalidate data cache for NS_BL1U and NS_BL2U images.
+ - Fixed attributes to Read-Write only for memory regions described in partition
+ manifests.
+
Version 2.5
-----------
@@ -14,6 +579,7 @@ New features
^^^^^^^^^^^^
- More tests are made available in this release to help validate the
functionalities in the following areas:
+
- True Random Number Generator (TRNG) test scenarios.
- Multicore / Power State Controller Interface (PSCI) tests.
- v8.6 Activity Monitors Unit (AMU) enhancements test scenarios.
@@ -1127,7 +1693,7 @@ All test images
--------------
-*Copyright (c) 2018-2020, Arm Limited. All rights reserved.*
+*Copyright (c) 2018-2022, Arm Limited. All rights reserved.*
.. _Arm Neoverse Reference Design N1 Edge (RD-N1-Edge): https://developer.arm.com/products/system-design/reference-design/neoverse-reference-design
.. _Arm SGI-575: https://developer.arm.com/products/system-design/fixed-virtual-platforms
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index d8ffe8b14..699b79de4 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -107,6 +107,9 @@ payload, whose simplistic build system is mostly independent.
- ``V``: Verbose build. If assigned anything other than 0, the build commands
are printed. Default is 0.
+- ``ENABLE_REALM_PAYLOAD_TESTS``: This option builds and packs Realm payload tests for
+ RME enabled stack. Default is 0.
+
Arm FVP Platform Specific Build Options
---------------------------------------
@@ -146,6 +149,12 @@ TFTF-specific Build Options
(RAM) or 1 (non-volatile memory like flash) as test results storage. Default
value is 0, as writing to the flash significantly slows tests down.
+Realm payload specific Build Options
+------------------------------------
+
+- ``ENABLE_REALM_PAYLOAD_TESTS=1`` This option builds and packs Realm payload tests
+ realm.bin to tftf.bin.
+
FWU-specific Build Options
--------------------------
diff --git a/docs/getting_started/build.rst b/docs/getting_started/build.rst
index d0147f9c0..111306d9b 100644
--- a/docs/getting_started/build.rst
+++ b/docs/getting_started/build.rst
@@ -16,6 +16,25 @@ Building TF-A Tests
export CROSS_COMPILE=<path-to-aarch32-gcc>/bin/arm-eabi-
+- It is possible to build TF-A Tests using clang (currently AArch64 only). To
+ do so ``CC`` needs to point to the clang binary. Only the compiler is switched;
+ the assembler and linker need to be provided by the GNU toolchain, thus
+ ``CROSS_COMPILE`` should be set as described above.
+
+ clang will be selected when the base name of the path assigned to ``CC``
+ contains the string 'clang'.
+
+- For AArch64 using clang:
+
+ .. code:: shell
+
+ export CROSS_COMPILE=<path-to-aarch64-gcc>/bin/aarch64-none-elf-
+ make CC=<path-to-clang>/bin/clang PLAT=<platform> tftf
+
+- Currently, the following TF-A Tests targets are supported for clang build:
+
+ ``tftf, ivy, realm, cactus, cactus_mm, ns_bl1u``
+
- Change to the root directory of the TF-A Tests source tree and build.
For AArch64:
@@ -51,7 +70,6 @@ Building TF-A Tests
- ``build/<platform>/<build-type>/cactus_mm.bin``
- ``build/<platform>/<build-type>/cactus.bin``
- ``build/<platform>/<build-type>/ivy.bin``
- - ``build/<platform>/<build-type>/quark.bin``
where ``<platform>`` is the name of the chosen platform and ``<build-type>``
is either ``debug`` or ``release``. The actual number of images might differ
@@ -103,6 +121,22 @@ command from the TF-A root directory:
Please refer to the `TF-A documentation`_ for further details.
+Realm payload test image
+````````````````````````
+
+``realm.bin`` is the realm payload test image and is packaged along with
+tftf for Realm Management Extension (RME) testing. This can be built using
+the following command:
+
+::
+
+ make PLAT=<platform> ENABLE_REALM_PAYLOAD_TESTS=1 tftf
+
+The generated ``realm.bin`` is packaged as part of ``tftf.bin``
+to be used as a single BL33 image.
+
+Please refer to the `TF-A RME documentation`_ for build and run instructions.
+
NS_BL1U and NS_BL2U test images
```````````````````````````````
@@ -311,6 +345,7 @@ To build TFTF with SPM tests, Cactus and Ivy use:
.. _Arm Management Mode Interface: https://developer.arm.com/documentation/den0060/a/
.. _Arm Firmware Framework for Armv8-A: https://developer.arm.com/docs/den0077/latest
.. _TF-A documentation: https://trustedfirmware-a.readthedocs.org
+.. _TF-A RME documentation: https://trustedfirmware-a.readthedocs.io/en/latest/components/realm-management-extension.html
.. _TF-A Secure Partition Manager (FF-A): https://trustedfirmware-a.readthedocs.io/en/latest/components/secure-partition-manager.html
.. _TF-A Secure Partition Manager (MM): https://trustedfirmware-a.readthedocs.io/en/latest/components/secure-partition-manager-mm.html
.. _Building TF-A Secure Partition Manager (MM): https://trustedfirmware-a.readthedocs.io/en/latest/components/secure-partition-manager-mm.html#building-tf-a-with-secure-partition-support
diff --git a/docs/getting_started/requirements.rst b/docs/getting_started/requirements.rst
index f9417c756..74b3e3d93 100644
--- a/docs/getting_started/requirements.rst
+++ b/docs/getting_started/requirements.rst
@@ -12,7 +12,7 @@ Build Host
----------
TF-A Tests may be built using a Linux build host machine with a recent Linux
-distribution. We have performed tests using Ubuntu 16.04 LTS (64-bit), but other
+distribution. We have performed tests using Ubuntu 22.04 LTS (64-bit), but other
distributions should also work fine, provided that the tools and libraries
can be installed.
@@ -23,20 +23,22 @@ Install the required packages to build TF-A Tests with the following command:
::
- sudo apt-get install device-tree-compiler build-essential git perl libxml-libxml-perl
+ sudo apt-get install device-tree-compiler build-essential git python3
-Download and install the GNU cross-toolchain from Linaro. The TF-A Tests have
-been tested with version 9.2-2019.12 (gcc 9.2):
+Note that at least Python 3.8 is required.
+
+Download and install the GNU cross-toolchain from Arm. The TF-A Tests have
+been tested with version 12.3.Rel1 (gcc 12.3):
- `GCC cross-toolchain`_
In addition, the following optional packages and tools may be needed:
-- For debugging, Arm `Development Studio 5 (DS-5)`_.
+- For debugging, Arm `Development Studio (Arm-DS)`_.
-.. _GCC cross-toolchain: https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads
-.. _Development Studio 5 (DS-5): https://developer.arm.com/products/software-development-tools/ds-5-development-studio
+.. _GCC cross-toolchain: https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/downloads
+.. _Development Studio (Arm-DS): https://developer.arm.com/Tools%20and%20Software/Arm%20Development%20Studio
--------------
-*Copyright (c) 2019, Arm Limited. All rights reserved.*
+*Copyright (c) 2019-2022, Arm Limited. All rights reserved.*
diff --git a/docs/index.rst b/docs/index.rst
index 4869af161..7e54db210 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -12,6 +12,7 @@ Trusted Firmware-A Tests Documentation
design
implementing-tests
porting/index
+ plat/index
change-log
license
diff --git a/docs/plat/index.rst b/docs/plat/index.rst
new file mode 100644
index 000000000..33c0ab983
--- /dev/null
+++ b/docs/plat/index.rst
@@ -0,0 +1,18 @@
+Platform Ports
+==============
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Contents
+ :hidden:
+
+ xilinx-versal_net
+ xilinx-versal
+ xilinx-zynqmp
+
+This section provides a list of supported upstream *platform ports* and the
+documentation associated with them.
+
+--------------
+
+*Copyright (c) 2024, Arm Limited. All rights reserved.*
diff --git a/docs/plat/xilinx-versal.rst b/docs/plat/xilinx-versal.rst
new file mode 100644
index 000000000..1fff22fea
--- /dev/null
+++ b/docs/plat/xilinx-versal.rst
@@ -0,0 +1,53 @@
+..
+ Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved. !
+
+ SPDX-License-Identifier: BSD-3-Clause !
+
+
+Xilinx Versal
+=============
+
+- The TF-A Tests on Xilinx Versal platfrom runs from DDR.
+- Logs are available only on console and not saved in memory(No NVM support).
+- Versal Platform uses TTC Timer
+
+
+Build Command
+-------------
+For individual tests/test suite:
+
+.. code-block:: shell
+
+ make CROSS_COMPILE=aarch64-none-elf- PLAT=versal TESTS=<required tests> tftf
+
+For Versal Specific tests (includes AMD-Xilinx Tests cases + Standard Test Suite)
+
+.. code-block:: shell
+
+ make CROSS_COMPILE=aarch64-none-elf- PLAT=versal TESTS=versal tftf
+
+Execution on Target
+-------------------
+
+- The TF-A Tests uses the memory location of U-boot.
+- To package the tftf.elf in BOOT.BIN, the u-boot entry in bootgen.bif needs to be replaced with following
+
+.. code-block:: shell
+
+ the_ROM_image:
+ {
+ image {
+ { type=bootimage, file=project-spec/hw-description/vpl_gen_fixed.pdi }
+ { type=bootloader, file=plm.elf }
+ { core=psm, file=psmfw.elf }
+ }
+ image {
+ id = 0x1c000000, name=apu_subsystem
+ { type=raw, load=0x00001000, file=system-default.dtb }
+ { core=a72-0, exception_level=el-3, trustzone, file=bl31.elf }
+ { core=a72-0, file=tftf.elf }
+ }
+ }
+
+- The BOOT.BIN with TF-A Tests can now be used to run on the target.
+- The TF-A Tests will be executed after TF-A and the tests report will be available on the console.
diff --git a/docs/plat/xilinx-versal_net.rst b/docs/plat/xilinx-versal_net.rst
new file mode 100644
index 000000000..1886489e3
--- /dev/null
+++ b/docs/plat/xilinx-versal_net.rst
@@ -0,0 +1,46 @@
+..
+ Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved. !
+ SPDX-License-Identifier: BSD-3-Clause !
+
+Xilinx Versal NET
+=================
+
+- The TF-A Tests on Xilinx Versal NET platform runs from DDR.
+- Logs are available only on console and not saved in memory(No NVM support).
+- Versal NET Platform uses TTC Timer
+
+Build Command
+-------------
+For individual tests/test suite:
+
+.. code-block:: shell
+ make CROSS_COMPILE=aarch64-none-elf- PLAT=versal_net TESTS=<required tests> tftf
+For Versal NET Specific tests (includes AMD-Xilinx Tests cases + Standard Test Suite)
+
+.. code-block:: shell
+ make CROSS_COMPILE=aarch64-none-elf- PLAT=versal_net TESTS=versal tftf
+
+Execution on Target
+-------------------
+
+- The TF-A Tests uses the memory location of U-boot.
+- To package the tftf.elf in BOOT.BIN, the u-boot entry in bootgen.bif needs to be replaced with following
+
+.. code-block:: shell
+ the_ROM_image:
+ {
+ image {
+ { type=bootimage, file=project-spec/hw-description/system.pdi }
+ { type=bootloader, file=plm.elf }
+ { core=psm, file=psmfw.elf }
+ }
+ image {
+ id = 0x1c000000, name=apu_subsystem
+ { type=raw, load=0x00001000, file=system-default.dtb }
+ { core=a78-0, exception_level=el-3, trustzone, file=bl31.elf }
+ { core=a78-0, exception_level=el-1, file=tftf.elf }
+ }
+ }
+
+- The BOOT.BIN with TF-A Tests can now be used to run on the target.
+- The TF-A Tests will be executed after TF-A and the tests report will be available on the console.
diff --git a/docs/plat/xilinx-zynqmp.rst b/docs/plat/xilinx-zynqmp.rst
new file mode 100644
index 000000000..bac6728df
--- /dev/null
+++ b/docs/plat/xilinx-zynqmp.rst
@@ -0,0 +1,45 @@
+..
+ Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved. !
+ SPDX-License-Identifier: BSD-3-Clause !
+
+Xilinx ZynqMP
+=============
+
+- The TF-A Tests on Xilinx ZynqMP platform runs from DDR.
+- Logs are available only on console and not saved in memory(No NVM support).
+- ZynqMP Platform uses TTC Timer
+
+Build Command
+-------------
+For individual tests/test suite:
+
+.. code-block:: shell
+
+ make CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp TESTS=<required tests> tftf
+
+For Versal NET Specific tests (includes AMD-Xilinx Tests cases + Standard Test Suite)
+
+.. code-block:: shell
+
+ make CROSS_COMPILE=aarch64-none-elf- PLAT=zynqmp TESTS=versal tftf
+
+Execution on Target
+-------------------
+
+- The TF-A Tests uses the memory location of U-boot.
+- To package the tftf.elf in BOOT.BIN, the u-boot entry in bootgen.bif needs to be replaced with following
+
+.. code-block:: shell
+
+ the_ROM_image:
+ {
+ [bootloader, destination_cpu=a53-0] zynqmp_fsbl.elf
+ [pmufw_image] pmufw.elf
+ [destination_device=pl] pre-built/linux/implementation/download.bit
+ [destination_cpu=a53-0, exception_level=el-3, trustzone] bl31.elf
+ [destination_cpu=a53-0, load=0x00100000] system.dtb
+ [destination_cpu=a53-0, exception_level=el-2] tftf.elf
+ }
+
+- The BOOT.BIN with TF-A Tests can now be used to run on the target.
+- The TF-A Tests will be executed after TF-A and the tests report will be available on the console.
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 358ed0e3c..4a2270179 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,3 +1,3 @@
-sphinx>=2.0.0
+sphinx >=6.2.0, <7.0.0
sphinx-rtd-theme>=0.4.3
sphinxcontrib-plantuml>=0.15
diff --git a/drivers/arm/gic/arm_gic_v2.c b/drivers/arm/gic/arm_gic_v2.c
index 826662632..a781703a4 100644
--- a/drivers/arm/gic/arm_gic_v2.c
+++ b/drivers/arm/gic/arm_gic_v2.c
@@ -9,6 +9,7 @@
#include <assert.h>
#include <debug.h>
#include <drivers/arm/gic_v2.h>
+#include <stdbool.h>
void arm_gic_enable_interrupts_local(void)
{
@@ -119,3 +120,8 @@ void arm_gic_init(uintptr_t gicc_base,
INFO("ARM GIC v2 driver initialized\n");
}
+bool arm_gic_is_espi_supported(void)
+{
+ /* ESPI not supported by GICv2. */
+ return false;
+}
diff --git a/drivers/arm/gic/arm_gic_v2v3.c b/drivers/arm/gic/arm_gic_v2v3.c
index a3f84d078..9c7692e09 100644
--- a/drivers/arm/gic/arm_gic_v2v3.c
+++ b/drivers/arm/gic/arm_gic_v2v3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,6 +11,7 @@
#include <drivers/arm/gic_common.h>
#include <drivers/arm/gic_v2.h>
#include <drivers/arm/gic_v3.h>
+#include <stdbool.h>
/* Record whether a GICv3 was detected on the system */
static unsigned int gicv3_detected;
@@ -194,3 +195,19 @@ void arm_gic_init(uintptr_t gicc_base,
INFO("%s mode detected\n", (gicv3_detected) ?
"GICv3" : "GICv2");
}
+
+bool arm_gic_is_espi_supported(void)
+{
+ unsigned int typer_reg = gicv3_get_gicd_typer();
+
+ if (!gicv3_detected) {
+ return false;
+ }
+
+ /* Check if extended SPI range is implemented. */
+ if ((typer_reg & TYPER_ESPI) != 0U) {
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/arm/gic/gic_v3.c b/drivers/arm/gic/gic_v3.c
index 56049e313..93c707a5d 100644
--- a/drivers/arm/gic/gic_v3.c
+++ b/drivers/arm/gic/gic_v3.c
@@ -505,3 +505,8 @@ void gicv3_init(uintptr_t gicr_base, uintptr_t gicd_base)
gicr_base_addr = gicr_base;
gicd_base_addr = gicd_base;
}
+
+unsigned int gicv3_get_gicd_typer(void)
+{
+ return gicd_read_typer(gicd_base_addr);
+}
diff --git a/drivers/arm/sp805/sp805.c b/drivers/arm/sp805/sp805.c
index 2318c40d8..85da43ae8 100644
--- a/drivers/arm/sp805/sp805.c
+++ b/drivers/arm/sp805/sp805.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -109,45 +109,75 @@ static inline uint32_t sp805_read_wdog_pcell_id(unsigned long base, unsigned int
return mmio_read_32(base + SP805_WDOG_PCELL_ID_OFF + (id << 2));
}
-void sp805_wdog_start(uint32_t wdog_cycles)
+static void sp805_wdog_start_(unsigned long base, uint32_t wdog_cycles)
{
/* Unlock to access the watchdog registers */
- sp805_write_wdog_lock(SP805_WDOG_BASE, SP805_WDOG_UNLOCK_ACCESS);
+ sp805_write_wdog_lock(base, SP805_WDOG_UNLOCK_ACCESS);
/* Write the number of cycles needed */
- sp805_write_wdog_load(SP805_WDOG_BASE, wdog_cycles);
+ sp805_write_wdog_load(base, wdog_cycles);
/* Enable reset interrupt and watchdog interrupt on expiry */
- sp805_write_wdog_ctrl(SP805_WDOG_BASE,
+ sp805_write_wdog_ctrl(base,
SP805_WDOG_CTRL_RESEN | SP805_WDOG_CTRL_INTEN);
/* Lock registers so that they can't be accidently overwritten */
- sp805_write_wdog_lock(SP805_WDOG_BASE, 0x0);
+ sp805_write_wdog_lock(base, 0x0);
}
-void sp805_wdog_stop(void)
+static void sp805_wdog_stop_(unsigned long base)
{
/* Unlock to access the watchdog registers */
- sp805_write_wdog_lock(SP805_WDOG_BASE, SP805_WDOG_UNLOCK_ACCESS);
+ sp805_write_wdog_lock(base, SP805_WDOG_UNLOCK_ACCESS);
/* Clearing INTEN bit stops the counter */
- sp805_write_wdog_ctrl(SP805_WDOG_BASE, 0x00);
+ sp805_write_wdog_ctrl(base, 0x00);
/* Lock registers so that they can't be accidently overwritten */
- sp805_write_wdog_lock(SP805_WDOG_BASE, 0x0);
+ sp805_write_wdog_lock(base, 0x0);
}
-void sp805_wdog_refresh(void)
+static void sp805_wdog_refresh_(unsigned long base)
{
/* Unlock to access the watchdog registers */
- sp805_write_wdog_lock(SP805_WDOG_BASE, SP805_WDOG_UNLOCK_ACCESS);
+ sp805_write_wdog_lock(base, SP805_WDOG_UNLOCK_ACCESS);
/*
* Write of any value to WdogIntClr clears interrupt and reloads
* the counter from the value in WdogLoad Register.
*/
- sp805_write_wdog_int_clr(SP805_WDOG_BASE, 1);
+ sp805_write_wdog_int_clr(base, 1);
/* Lock registers so that they can't be accidently overwritten */
- sp805_write_wdog_lock(SP805_WDOG_BASE, 0x0);
+ sp805_write_wdog_lock(base, 0x0);
+}
+
+void sp805_wdog_start(uint32_t wdog_cycles)
+{
+ sp805_wdog_start_(SP805_WDOG_BASE, wdog_cycles);
+}
+
+void sp805_wdog_stop(void)
+{
+ sp805_wdog_stop_(SP805_WDOG_BASE);
+}
+
+void sp805_wdog_refresh(void)
+{
+ sp805_wdog_refresh_(SP805_WDOG_BASE);
+}
+
+void sp805_twdog_start(uint32_t wdog_cycles)
+{
+ sp805_wdog_start_(SP805_TWDOG_BASE, wdog_cycles);
+}
+
+void sp805_twdog_stop(void)
+{
+ sp805_wdog_stop_(SP805_TWDOG_BASE);
+}
+
+void sp805_twdog_refresh(void)
+{
+ sp805_wdog_refresh_(SP805_TWDOG_BASE);
}
diff --git a/drivers/cadence/uart/aarch64/cdns_console.S b/drivers/cadence/uart/aarch64/cdns_console.S
new file mode 100644
index 000000000..c88990e24
--- /dev/null
+++ b/drivers/cadence/uart/aarch64/cdns_console.S
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <drivers/console.h>
+#include <drivers/cadence/cdns_uart.h>
+#include <platform_def.h>
+
+ /*
+ * "core" functions are low-level implementations that don't require
+ * writable memory and are thus safe to call in BL1 crash context.
+ */
+ .globl console_core_init
+ .globl console_core_putc
+ .globl console_core_getc
+ .globl console_core_flush
+
+ .globl console_init
+ .globl console_putc
+ .globl console_getc
+ .globl console_flush
+
+ /*
+ * The console base is in the data section and not in .bss
+ * even though it is zero-init. In particular, this allows
+ * the console functions to start using this variable before
+ * the runtime memory is initialized for images which do not
+ * need to copy the .data section from ROM to RAM.
+ */
+ .section .data.console_base
+ .align 3
+console_base: .quad 0x0
+
+
+func console_init
+ adrp x3, console_base
+ str x0, [x3, :lo12:console_base]
+ b console_core_init
+endfunc console_init
+
+ /* -----------------------------------------------
+ * int console_core_init(uintptr_t base_addr)
+ * Function to initialize the console without a
+ * C Runtime to print debug information. This
+ * function will be accessed by console_init and
+ * crash reporting.
+ * We assume that the bootloader already set up
+ * the HW (baud, ...) and only enable the trans-
+ * mitter and receiver here.
+ * In: x0 - console base address
+ * Out: return 1 on success else 0 on error
+ * Clobber list : x1, x2, x3
+ * -----------------------------------------------
+ */
+func console_core_init
+ /* Check the input base address */
+ cbz x0, core_init_fail
+
+ /* RX/TX enabled & reset */
+ mov w3, #(R_UART_CR_TX_EN | R_UART_CR_RX_EN | R_UART_CR_TXRST | R_UART_CR_RXRST)
+ str w3, [x0, #R_UART_CR]
+
+ mov w0, #1
+ ret
+core_init_fail:
+ mov w0, wzr
+ ret
+endfunc console_core_init
+
+
+ /* --------------------------------------------------------
+ * int console_cdns_core_putc(int c, uintptr_t base_addr)
+ * Function to output a character over the console. It
+ * returns the character printed on success or -1 on error.
+ * In : w0 - character to be printed
+ * x1 - console base address
+ * Out : return -1 on error else return character.
+ * Clobber list : x2
+ * --------------------------------------------------------
+ */
+func console_core_putc
+ cbz x1, putc_error
+ /* Prepend '\r' to '\n' */
+ cmp w0, #0xA
+ b.ne 2f
+1:
+ /* Check if the transmit FIFO is empty */
+ ldr w2, [x1, #R_UART_SR]
+ tbz w2, #UART_SR_INTR_TEMPTY_BIT, 1b
+ mov w2, #0xD
+ str w2, [x1, #R_UART_TX]
+2:
+ /* Check if the transmit FIFO is empty */
+ ldr w2, [x1, #R_UART_SR]
+ tbz w2, #UART_SR_INTR_TEMPTY_BIT, 2b
+ str w0, [x1, #R_UART_TX]
+ ret
+putc_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_putc
+
+ /* --------------------------------------------------------
+ * int console_cdns_putc(int c, console_t *cdns)
+ * Function to output a character over the console. It
+ * returns the character printed on success or -1 on error.
+ * In : w0 - character to be printed
+ * x1 - pointer to console_t structure
+ * Out : return -1 on error else return character.
+ * Clobber list : x2
+ * --------------------------------------------------------
+ */
+func console_putc
+ adrp x1, console_base
+ ldr x1, [x1, :lo12:console_base]
+ b console_core_putc
+endfunc console_putc
+
+ /* ---------------------------------------------
+ * int console_cdns_core_getc(uintptr_t base_addr)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or -1 if no character is available.
+ * In : x0 - console base address
+ * Out: w0 - character if available, else -1
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_core_getc
+ adr x0, console_base
+ ldr x0, [x0]
+
+ /* Check if the receive FIFO is empty */
+ ldr w1, [x0, #R_UART_SR]
+ tbnz w1, #UART_SR_INTR_REMPTY_BIT, no_char
+ ldr w1, [x0, #R_UART_RX]
+ mov w0, w1
+ ret
+no_char:
+ mov w0, #ERROR_NO_PENDING_CHAR
+ ret
+endfunc console_core_getc
+
+ /* ---------------------------------------------
+ * int console_cdns_getc(console_t *console)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or -1 if no character is available.
+ * In : x0 - pointer to console_t structure
+ * Out: w0 - character if available, else -1
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_getc
+ adrp x0, console_base
+ ldr x0, [x0, :lo12:console_base]
+ b console_core_getc
+endfunc console_getc
+
+ /* ---------------------------------------------
+ * int console_cdns_core_flush(uintptr_t base_addr)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * In : x0 - console base address
+ * Out : void
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_core_flush
+ cbz x0, flush_error
+ /* Loop until the transmit FIFO is empty */
+1:
+ ldr w2, [x1, #R_UART_SR]
+ tbz w2, #UART_SR_INTR_TEMPTY_BIT, 1b
+ str w0, [x1, #R_UART_TX]
+ ret
+flush_error:
+ mov w0, #ERROR_NO_VALID_CONSOLE
+ ret
+endfunc console_core_flush
+
+ /* ---------------------------------------------
+ * void console_cdns_flush(console_t *console)
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * In : x0 - pointer to console_t structure
+ * Out : void.
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_flush
+ adrp x0, console_base
+ ldr x0, [x0, :lo12:console_base]
+ b console_core_flush
+endfunc console_flush
diff --git a/el3_payload/Makefile b/el3_payload/Makefile
index 188731c7e..3ea39fb84 100644
--- a/el3_payload/Makefile
+++ b/el3_payload/Makefile
@@ -4,6 +4,8 @@
# SPDX-License-Identifier: BSD-3-Clause
#
+include ../make_helpers/build_macros.mk
+
# Default number of threads per CPU on FVP
FVP_MAX_PE_PER_CPU := 1
@@ -31,6 +33,8 @@ PLAT ?= fvp
ASFLAGS := -nostdinc -ffreestanding -Wa,--fatal-warnings -Werror
ASFLAGS += -Iplat/${PLAT}/ -I.
+LDFLAGS += $(call ld_option,--no-warn-rwx-segments)
+
PLAT_BUILD_DIR := build/${PLAT}
SOURCES := entrypoint.S spin.S uart.S plat/${PLAT}/platform.S
OBJS := $(patsubst %,$(PLAT_BUILD_DIR)/%,$(notdir $(SOURCES:.S=.o)))
diff --git a/el3_payload/plat/tc0/platform.S b/el3_payload/plat/tc/platform.S
index 1fac2c4b2..019b44cd2 100644
--- a/el3_payload/plat/tc0/platform.S
+++ b/el3_payload/plat/tc/platform.S
@@ -13,15 +13,15 @@
/*----------------------------------------------------------------------
* unsigned int platform_get_core_pos(unsigned long mpid)
*
- * Function to calculate the core position on TC0 platforms.
+ * Function to calculate the core position on TC platforms.
*
- * (ClusterId * TC0_MAX_CPUS_PER_CLUSTER * TC0_MAX_PE_PER_CPU) +
- * (CPUId * TC0_MAX_PE_PER_CPU) +
+ * (ClusterId * TC_MAX_CPUS_PER_CLUSTER * TC_MAX_PE_PER_CPU) +
+ * (CPUId * TC_MAX_PE_PER_CPU) +
* ThreadId
*
* which can be simplified as:
*
- * ((ClusterId * TC0_MAX_CPUS_PER_CLUSTER + CPUId) * TC0_MAX_PE_PER_CPU)
+ * ((ClusterId * TC_MAX_CPUS_PER_CLUSTER + CPUId) * TC_MAX_PE_PER_CPU)
* + ThreadId
*
* ---------------------------------------------------------------------
@@ -41,9 +41,9 @@ func platform_get_core_pos
ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
/* Compute linear position */
- mov x3, #TC0_MAX_CPUS_PER_CLUSTER
+ mov x3, #TC_MAX_CPUS_PER_CLUSTER
madd x1, x2, x3, x1
- mov x3, #TC0_MAX_PE_PER_CPU
+ mov x3, #TC_MAX_PE_PER_CPU
madd x0, x1, x3, x0
ret
endfunc platform_get_core_pos
diff --git a/el3_payload/plat/tc0/platform.h b/el3_payload/plat/tc/platform.h
index 5e4300257..66dd4479a 100644
--- a/el3_payload/plat/tc0/platform.h
+++ b/el3_payload/plat/tc/platform.h
@@ -13,7 +13,7 @@
#define UART_BASE 0x7ff80000
-#define TC0_MAX_CPUS_PER_CLUSTER 8
-#define TC0_MAX_PE_PER_CPU 1
+#define TC_MAX_CPUS_PER_CLUSTER 8
+#define TC_MAX_PE_PER_CPU 1
#endif /* PLATFORM_H */
diff --git a/el3_payload/plat/tc0/platform.mk b/el3_payload/plat/tc/platform.mk
index 8ff1cda1c..8ff1cda1c 100644
--- a/el3_payload/plat/tc0/platform.mk
+++ b/el3_payload/plat/tc/platform.mk
diff --git a/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S b/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
index b9c0d8716..74c56008d 100644
--- a/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
+++ b/fwu/ns_bl1u/aarch32/ns_bl1u_entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -28,6 +28,17 @@ func ns_bl1u_entrypoint
stcopr r0, HSCTLR
isb
+ /* --------------------------------------------------------------------
+ * Invalidate the cache in the RW memory range to safeguard
+ * against possible stale data in the cache from previous
+ * firmware stage.
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =__RW_START__
+ ldr r1, =__RW_END__
+ sub r1, r1, r0
+ bl inv_dcache_range
+
/* ---------------------------------------------------------------------
* Init C runtime environment.
* - Zero-initialise the .bss section;
@@ -48,8 +59,6 @@ func ns_bl1u_entrypoint
* Allocate a stack whose memory will be marked as Normal
* Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
* enabled.
- * There is no risk of reading stale stack memory after enabling the MMU
- * as only the primary CPU is running at the moment.
* ---------------------------------------------------------------------
*/
ldcopr r0, MPIDR
diff --git a/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S b/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
index d971e4ad6..a73435bb1 100644
--- a/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
+++ b/fwu/ns_bl1u/aarch64/ns_bl1u_entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -28,6 +28,17 @@ func ns_bl1u_entrypoint
asm_write_sctlr_el1_or_el2 x1
isb
+ /* --------------------------------------------------------------------
+ * Invalidate the cache in the RW memory range to safeguard
+ * against possible stale data in the cache from previous
+ * firmware stage.
+ * --------------------------------------------------------------------
+ */
+ ldr x0, =__RW_START__
+ ldr x1, =__RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+
/* ---------------------------------------------------------------------
* Init C runtime environment.
* - Zero-initialise the .bss section;
@@ -48,8 +59,6 @@ func ns_bl1u_entrypoint
* Allocate a stack whose memory will be marked as Normal
* Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
* enabled.
- * There is no risk of reading stale stack memory after enabling the MMU
- * as only the primary CPU is running at the moment.
* ---------------------------------------------------------------------
*/
mrs x0, mpidr_el1
diff --git a/fwu/ns_bl1u/ns_bl1u.ld.S b/fwu/ns_bl1u/ns_bl1u.ld.S
index 52d80deab..5021dafe9 100644
--- a/fwu/ns_bl1u/ns_bl1u.ld.S
+++ b/fwu/ns_bl1u/ns_bl1u.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -43,6 +43,7 @@ SECTIONS
* Its VMA must be page-aligned as it marks the first read/write page.
*/
. = NS_BL1U_RW_BASE;
+ __RW_START__ = .;
ASSERT(. == ALIGN(PAGE_SIZE),
"NS_BL1U_RW_BASE address is not aligned on a page boundary.")
.data . : ALIGN(16) {
@@ -79,6 +80,7 @@ SECTIONS
__NS_BL1U_RAM_START__ = ADDR(.data);
__NS_BL1U_RAM_END__ = .;
+ __RW_END__ = .;
__DATA_ROM_START__ = LOADADDR(.data);
__DATA_SIZE__ = SIZEOF(.data);
diff --git a/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S b/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
index 28a457251..76116428b 100644
--- a/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
+++ b/fwu/ns_bl2u/aarch32/ns_bl2u_entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -28,6 +28,17 @@ func ns_bl2u_entrypoint
stcopr r0, HSCTLR
isb
+ /* --------------------------------------------------------------------
+ * Invalidate the cache in the RW memory range to safeguard
+ * against possible stale data in the cache from previous
+ * firmware stage.
+ * --------------------------------------------------------------------
+ */
+ ldr r0, =__RW_START__
+ ldr r1, =__RW_END__
+ sub r1, r1, r0
+ bl inv_dcache_range
+
/* ---------------------------------------------------------------------
* Zero-initialise the .bss section.
* ---------------------------------------------------------------------
@@ -40,8 +51,6 @@ func ns_bl2u_entrypoint
* Allocate a stack whose memory will be marked as Normal
* Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
* enabled.
- * There is no risk of reading stale stack memory after enabling the MMU
- * as only the primary CPU is running at the moment.
* ---------------------------------------------------------------------
*/
ldcopr r0, MPIDR
diff --git a/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S b/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
index 28d6dface..1bc057c63 100644
--- a/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
+++ b/fwu/ns_bl2u/aarch64/ns_bl2u_entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -28,6 +28,17 @@ func ns_bl2u_entrypoint
asm_write_sctlr_el1_or_el2 x1
isb
+ /* --------------------------------------------------------------------
+ * Invalidate the cache in the RW memory range to safeguard
+ * against possible stale data in the cache from previous
+ * firmware stage.
+ * --------------------------------------------------------------------
+ */
+ ldr x0, =__RW_START__
+ ldr x1, =__RW_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+
/* ---------------------------------------------------------------------
* Zero-initialise the .bss section.
* ---------------------------------------------------------------------
@@ -40,8 +51,6 @@ func ns_bl2u_entrypoint
* Allocate a stack whose memory will be marked as Normal
* Inner-Shareable, Write-Back, Write-Allocate memory when the MMU is
* enabled.
- * There is no risk of reading stale stack memory after enabling the MMU
- * as only the primary CPU is running at the moment.
* ---------------------------------------------------------------------
*/
mrs x0, mpidr_el1
diff --git a/fwu/ns_bl2u/ns_bl2u.ld.S b/fwu/ns_bl2u/ns_bl2u.ld.S
index a6c6d2e88..d8949f3cd 100644
--- a/fwu/ns_bl2u/ns_bl2u.ld.S
+++ b/fwu/ns_bl2u/ns_bl2u.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -42,6 +42,8 @@ SECTIONS
__RODATA_END__ = .;
} >RAM
+ __RW_START__ = .;
+
.data . : {
__DATA_START__ = .;
*(.data*)
@@ -75,6 +77,7 @@ SECTIONS
} >RAM
__NS_BL2U_END__ = .;
+ __RW_END__ = .;
__BSS_SIZE__ = SIZEOF(.bss);
diff --git a/fwu/ns_bl2u/ns_bl2u.mk b/fwu/ns_bl2u/ns_bl2u.mk
index 0864313ac..7225d01bd 100644
--- a/fwu/ns_bl2u/ns_bl2u.mk
+++ b/fwu/ns_bl2u/ns_bl2u.mk
@@ -16,6 +16,7 @@ NS_BL2U_INCLUDES := \
-Iinclude/common/${ARCH} \
-Iinclude/lib \
-Iinclude/lib/${ARCH} \
+ -Iinclude/lib/extensions \
-Iinclude/lib/utils \
-Iinclude/lib/xlat_tables \
-Iinclude/plat/common \
diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S
index d829133f7..8a69c38d5 100644
--- a/include/common/aarch64/asm_macros.S
+++ b/include/common/aarch64/asm_macros.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -79,6 +79,15 @@
.endm
/*
+ * Create a vector entry that just spins making the exception unrecoverable.
+ */
+ .macro vector_entry_spin name
+ vector_entry \name
+ b \name
+ end_vector_entry \name
+ .endm
+
+ /*
* This macro calculates the base address of an MP stack using the
* platform_get_core_pos() index, the name of the stack storage and
* the size of each stack
diff --git a/include/common/debug.h b/include/common/debug.h
index 4b30175d9..6025590b4 100644
--- a/include/common/debug.h
+++ b/include/common/debug.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2014-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -24,6 +24,11 @@ __attribute__((format(printf, 1, 2)))
void mp_printf(const char *fmt, ...);
#endif /* IMAGE_CACTUS_MM */
+#ifdef IMAGE_REALM
+void realm_printf(const char *fmt, ...);
+#define mp_printf realm_printf
+#endif
+
/*
* The log output macros print output to the console. These macros produce
* compiled log output only if the LOG_LEVEL defined in the makefile (or the
diff --git a/include/common/firmware_image_package.h b/include/common/firmware_image_package.h
index aba5b57f0..2a144ba8e 100644
--- a/include/common/firmware_image_package.h
+++ b/include/common/firmware_image_package.h
@@ -22,6 +22,8 @@
{{0x4f, 0x51, 0x1d, 0x11}, {0x2b, 0xe5}, {0x4e, 0x49}, 0xb4, 0xc5, {0x83, 0xc2, 0xf7, 0x15, 0x84, 0x0a} }
#define UUID_FIRMWARE_UPDATE_FWU_CERT \
{{0x71, 0x40, 0x8a, 0xb2}, {0x18, 0xd6}, {0x87, 0x4c}, 0x8b, 0x2e, {0xc6, 0xdc, 0xcd, 0x50, 0xf0, 0x96} }
+#define UUID_TRUSTED_KEY_CERT \
+ {{0x82, 0x7e, 0xe8, 0x90}, {0xf8, 0x60}, {0xe4, 0x11}, 0xa1, 0xb4, {0x77, 0x7a, 0x21, 0xb4, 0xf9, 0x4c} }
typedef struct fip_toc_header {
uint32_t name;
diff --git a/include/common/test_helpers.h b/include/common/test_helpers.h
index 3ee2b5318..8cddc72d3 100644
--- a/include/common/test_helpers.h
+++ b/include/common/test_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,10 +8,9 @@
#define TEST_HELPERS_H__
#include <arch_features.h>
-#include <ffa_svc.h>
#include <plat_topology.h>
#include <psci.h>
-#include <spm_common.h>
+#include <sme.h>
#include <tftf_lib.h>
#include <trusted_os.h>
#include <tsp.h>
@@ -89,6 +88,15 @@ typedef test_result_t (*test_function_arg_t)(void *arg);
} \
} while (0)
+#define SKIP_TEST_IF_DIT_NOT_SUPPORTED() \
+ do { \
+ if (!is_armv8_4_dit_present()) { \
+ tftf_testcase_printf( \
+ "DIT not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
#define SKIP_TEST_IF_PAUTH_NOT_SUPPORTED() \
do { \
if (!is_armv8_3_pauth_present()) { \
@@ -107,6 +115,14 @@ typedef test_result_t (*test_function_arg_t)(void *arg);
} \
} while (0)
+#define SKIP_TEST_IF_SVE_NOT_SUPPORTED() \
+ do { \
+ if (!is_armv8_2_sve_present()) { \
+ tftf_testcase_printf("SVE not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
#define SKIP_TEST_IF_ECV_NOT_SELF_SYNC() \
do { \
if (get_armv8_6_ecv_support() != \
@@ -162,33 +178,6 @@ typedef test_result_t (*test_function_arg_t)(void *arg);
version & MM_VERSION_MINOR_MASK); \
} while (0)
-#define SKIP_TEST_IF_FFA_VERSION_LESS_THAN(major, minor) \
- do { \
- smc_ret_values smc_ret = ffa_version( \
- MAKE_FFA_VERSION(major, minor)); \
- uint32_t version = smc_ret.ret0; \
- \
- if (version == FFA_ERROR_NOT_SUPPORTED) { \
- tftf_testcase_printf("FFA_VERSION not supported.\n"); \
- return TEST_RESULT_SKIPPED; \
- } \
- \
- if ((version & FFA_VERSION_BIT31_MASK) != 0U) { \
- tftf_testcase_printf("FFA_VERSION bad response: %x\n", \
- version); \
- return TEST_RESULT_FAIL; \
- } \
- \
- if (version < MAKE_FFA_VERSION(major, minor)) { \
- tftf_testcase_printf("FFA_VERSION returned %u.%u\n" \
- "The required version is %u.%u\n", \
- version >> FFA_VERSION_MAJOR_SHIFT, \
- version & FFA_VERSION_MINOR_MASK, \
- major, minor); \
- return TEST_RESULT_SKIPPED; \
- } \
- } while (0)
-
#define SKIP_TEST_IF_ARCH_DEBUG_VERSION_LESS_THAN(version) \
do { \
uint32_t debug_ver = arch_get_debug_version(); \
@@ -202,41 +191,141 @@ typedef test_result_t (*test_function_arg_t)(void *arg);
} \
} while (0)
-#define SKIP_TEST_IF_FFA_ENDPOINT_NOT_DEPLOYED(mb, ffa_uuid) \
+#define SKIP_TEST_IF_TRBE_NOT_SUPPORTED() \
do { \
- smc_ret_values smc_ret = ffa_partition_info_get(ffa_uuid); \
- ffa_rx_release(); \
- if (smc_ret.ret0 == FFA_ERROR && \
- smc_ret.ret2 == FFA_ERROR_INVALID_PARAMETER) { \
- tftf_testcase_printf("FFA endpoint not deployed!\n"); \
+ if (!get_armv9_0_trbe_support()) { \
+ tftf_testcase_printf("ARMv9-TRBE not supported\n"); \
return TEST_RESULT_SKIPPED; \
- } else if (smc_ret.ret0 != FFA_SUCCESS_SMC32) { \
- ERROR("ffa_partition_info_get failed!\n"); \
- return TEST_RESULT_FAIL; \
} \
- } while (0)
+ } while (false)
+
+#define SKIP_TEST_IF_TRF_NOT_SUPPORTED() \
+ do { \
+ if (!get_armv8_4_trf_support()) { \
+ tftf_testcase_printf("ARMv8.4-TRF not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
-#define GET_TFTF_MAILBOX(mb) \
+#define SKIP_TEST_IF_SYS_REG_TRACE_NOT_SUPPORTED() \
do { \
- if (!get_tftf_mailbox(&mb)) { \
- ERROR("Mailbox not configured!\nThis test relies on" \
- " test suite \"FF-A RXTX Mapping\" to map/configure" \
- " RXTX buffers\n"); \
- return TEST_RESULT_FAIL; \
+ if (!get_armv8_0_sys_reg_trace_support()) { \
+ tftf_testcase_printf("ARMv8-system register" \
+ "trace not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
} \
- } while (false);
+ } while (false)
-#define CHECK_SPMC_TESTING_SETUP(ffa_major, ffa_minor, expected_uuids) \
+#define SKIP_TEST_IF_AFP_NOT_SUPPORTED() \
do { \
- const size_t expected_uuids_size = \
- sizeof(expected_uuids) / sizeof(struct ffa_uuid); \
- test_result_t ret = check_spmc_testing_set_up( \
- ffa_major, ffa_minor, expected_uuids, \
- expected_uuids_size); \
- if (ret != TEST_RESULT_SUCCESS) { \
- return ret; \
+ if (!get_feat_afp_present()) { \
+ tftf_testcase_printf("ARMv8.7-afp not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_MPAM_NOT_SUPPORTED() \
+ do { \
+ if(!is_feat_mpam_supported()){ \
+ tftf_testcase_printf("ARMv8.4-mpam not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#ifdef __aarch64__
+#define SKIP_TEST_IF_PA_SIZE_LESS_THAN(n) \
+ do { \
+ static const unsigned int pa_range_bits_arr[] = { \
+ PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011,\
+ PARANGE_0100, PARANGE_0101, PARANGE_0110 \
+ }; \
+ if (pa_range_bits_arr[get_pa_range()] < n) { \
+ tftf_testcase_printf("PA size less than %d bit\n", n); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+#else
+#define SKIP_TEST_IF_PA_SIZE_LESS_THAN(n) \
+ do { \
+ return TEST_RESULT_SKIPPED; \
+ } while (false)
+#endif
+
+#define SKIP_TEST_IF_BRBE_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_brbe_support()) { \
+ tftf_testcase_printf("FEAT_BRBE not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_WFXT_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_wfxt_present()) { \
+ tftf_testcase_printf("ARMv8.7-WFxT not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_RNG_TRAP_NOT_SUPPORTED() \
+ do { \
+ if (!is_feat_rng_trap_present()) { \
+ tftf_testcase_printf("ARMv8.5-RNG_TRAP not" \
+ "supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_PMUV3_NOT_SUPPORTED() \
+ do { \
+ if (!get_feat_pmuv3_supported()) { \
+ tftf_testcase_printf("FEAT_PMUv3 not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_SME_NOT_SUPPORTED() \
+ do { \
+ if(!is_feat_sme_supported()) { \
+ tftf_testcase_printf("FEAT_SME not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_SME2_NOT_SUPPORTED() \
+ do { \
+ if(!is_feat_sme2_supported()) { \
+ tftf_testcase_printf("FEAT_SME2 not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+#define SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP() \
+ do { \
+ u_register_t retrmm = 0U; \
+ \
+ if (!get_armv9_2_feat_rme_support()) { \
+ tftf_testcase_printf("FEAT_RME not supported\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ host_rmi_init_cmp_result(); \
+ retrmm = host_rmi_version(RMI_ABI_VERSION_VAL); \
+ \
+ VERBOSE("RMM version is: %lu.%lu\n", \
+ RMI_ABI_VERSION_GET_MAJOR(retrmm), \
+ RMI_ABI_VERSION_GET_MINOR(retrmm)); \
+ \
+ /* \
+ * TODO: Remove this once SMC_RMM_REALM_CREATE is implemented \
+ * in TRP. For the moment skip the test if RMM is TRP, TRP \
+ * version is always 0. \
+ */ \
+ if (retrmm == 0U) { \
+ tftf_testcase_printf("RMM is TRP\n"); \
+ return TEST_RESULT_SKIPPED; \
} \
- } while (false);
+ } while (false)
/* Helper macro to verify if system suspend API is supported */
#define is_psci_sys_susp_supported() \
@@ -289,22 +378,18 @@ test_result_t map_test_unmap(const map_args_unmap_t *args,
test_function_arg_t test);
/*
- * Helper function to set TFTF global mailbox for SPM related tests.
- * This function should be invoked by the first TFTF test that requires
- * RX and/or TX buffers.
+ * Utility function to wait for all CPUs other than the caller to be
+ * OFF.
*/
-void set_tftf_mailbox(const struct mailbox_buffers *mb);
+void wait_for_non_lead_cpus(void);
/*
- * Helper function to get TFTF global mailbox for SPM related tests.
- * This function should be called by all tests that require access to RX or TX
- * buffers, after the function 'set_tftf_mailbox' has been used by the first
- * test to rely on RX and TX buffers.
+ * Utility function to wait for a given CPU other than the caller to be
+ * OFF.
*/
-bool get_tftf_mailbox(struct mailbox_buffers *mb);
+void wait_for_core_to_turn_off(unsigned int mpidr);
-test_result_t check_spmc_testing_set_up(uint32_t ffa_version_major,
- uint32_t ffa_version_minor, const struct ffa_uuid *ffa_uuids,
- size_t ffa_uuids_size);
+/* Generate 64-bit random number */
+unsigned long long rand64(void);
#endif /* __TEST_HELPERS_H__ */
diff --git a/include/drivers/arm/arm_gic.h b/include/drivers/arm/arm_gic.h
index 0f27dc1ca..528ec6ec6 100644
--- a/include/drivers/arm/arm_gic.h
+++ b/include/drivers/arm/arm_gic.h
@@ -7,6 +7,7 @@
#ifndef __ARM_GIC_H__
#define __ARM_GIC_H__
+#include <stdbool.h>
#include <stdint.h>
/***************************************************************************
@@ -150,4 +151,9 @@ void arm_gic_save_context_global(void);
*****************************************************************************/
void arm_gic_restore_context_global(void);
+/******************************************************************************
+ * Check if extended SPI range is implemented by GIC.
+ *****************************************************************************/
+bool arm_gic_is_espi_supported(void);
+
#endif /* __ARM_GIC_H__ */
diff --git a/include/drivers/arm/gic_v3.h b/include/drivers/arm/gic_v3.h
index 0346a235f..2f4b52d68 100644
--- a/include/drivers/arm/gic_v3.h
+++ b/include/drivers/arm/gic_v3.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -34,6 +34,13 @@
#define IROUTER_IRM_SHIFT 31
#define IROUTER_IRM_MASK 0x1
+/* GICD_TYPER shifts and masks */
+#define TYPER_ESPI U(1 << 8)
+#define TYPER_DVIS U(1 << 18)
+#define TYPER_ESPI_RANGE_MASK U(0x1f)
+#define TYPER_ESPI_RANGE_SHIFT U(27)
+#define TYPER_ESPI_RANGE U(TYPER_ESPI_MASK << TYPER_ESPI_SHIFT)
+
/*******************************************************************************
* GICv3 Re-distributor interface registers & constants
******************************************************************************/
@@ -83,6 +90,25 @@
#define IGRPEN1_EL1_ENABLE_SHIFT 0
#define IGRPEN1_EL1_ENABLE_BIT (1 << IGRPEN1_EL1_ENABLE_SHIFT)
+/* ICH_ICH_LR<n>_EL2 definitions */
+#define ICH_LRn_EL2_STATE_Invalid (0UL << 62)
+#define ICH_LRn_EL2_STATE_Pending (1UL << 62)
+#define ICH_LRn_EL2_STATE_Active (2UL << 62)
+#define ICH_LRn_EL2_STATE_Pending_Active (3UL << 62)
+#define ICH_LRn_EL2_Group_0 (0UL << 60)
+#define ICH_LRn_EL2_Group_1 (1UL << 60)
+#define ICH_LRn_EL2_Priority_SHIFT 48
+#define ICH_LRn_EL2_Priority_MASK 0xFF
+#define ICH_LRn_EL2_vINTID_SHIFT 0
+#define ICH_LRn_EL2_vINTID_MASK 0xFFFF
+
+/* ICV_CTLR_EL1 definitions */
+#define ICV_CTLR_EL1_PRIbits_SHIFT 8
+#define ICV_CTLR_EL1_PRIbits_MASK 7
+
+/* ICV_IGRPEN1_EL1 definition */
+#define ICV_IGRPEN1_EL1_Enable 1UL
+
/* The highest affinity 0 that can be a SGI target*/
#define SGI_TARGET_MAX_AFF0 16
@@ -210,6 +236,10 @@ void gicv3_setup_cpuif(void);
*/
void gicv3_enable_cpuif(void);
+/*
+ * Return the value of GICD_TYPER.
+ */
+unsigned int gicv3_get_gicd_typer(void);
#endif /*__ASSEMBLY__*/
#endif /* __GIC_V3_H__ */
diff --git a/include/drivers/arm/sp805.h b/include/drivers/arm/sp805.h
index c033ccfda..75bcc123a 100644
--- a/include/drivers/arm/sp805.h
+++ b/include/drivers/arm/sp805.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -47,9 +47,17 @@
#define SP805_WDOG_PCELL_ID_SHIFT 0
#define SP805_WDOG_PCELL_ID_MASK 0xff
+#define ARM_SP805_TWDG_CLK_HZ 32768
+
+/* Public APIs for non-trusted watchdog module. */
void sp805_wdog_start(unsigned int wdog_cycles);
void sp805_wdog_stop(void);
void sp805_wdog_refresh(void);
+/* Public APIs for trusted watchdog module. */
+void sp805_twdog_start(unsigned int wdog_cycles);
+void sp805_twdog_stop(void);
+void sp805_twdog_refresh(void);
+
#endif /* __SP805_H__ */
diff --git a/include/drivers/cadence/cdns_uart.h b/include/drivers/cadence/cdns_uart.h
new file mode 100644
index 000000000..87e98eaa7
--- /dev/null
+++ b/include/drivers/cadence/cdns_uart.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CDNS_UART_H
+#define CDNS_UART_H
+
+#include <drivers/console.h>
+#include <lib/utils_def.h>
+/* This is very minimalistic and will only work in QEMU. */
+
+/* CADENCE Registers */
+#define R_UART_CR 0
+#define R_UART_CR_RXRST (1 << 0) /* RX logic reset */
+#define R_UART_CR_TXRST (1 << 1) /* TX logic reset */
+#define R_UART_CR_RX_EN (1 << 2) /* RX enabled */
+#define R_UART_CR_TX_EN (1 << 4) /* TX enabled */
+
+#define R_UART_SR 0x2C
+#define UART_SR_INTR_REMPTY_BIT 1
+#define UART_SR_INTR_TFUL_BIT 4
+#define UART_SR_INTR_TEMPTY_BIT 3
+
+#define R_UART_TX 0x30
+#define R_UART_RX 0x30
+
+#define CONSOLE_T_BASE (U(5) * REGSZ)
+
+#endif /* CDNS_UART_H */
diff --git a/include/lib/aarch32/arch.h b/include/lib/aarch32/arch.h
index 640457b51..6f41db968 100644
--- a/include/lib/aarch32/arch.h
+++ b/include/lib/aarch32/arch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -20,6 +20,8 @@
#define MIDR_REV_BITS U(4)
#define MIDR_PN_MASK U(0xfff)
#define MIDR_PN_SHIFT U(4)
+#define MIDR_VAR_MASK U(0xf0)
+#define MIDR_REV_MASK U(0xf)
/*******************************************************************************
* MPIDR macros
@@ -99,6 +101,16 @@
#define ID_MMFR4_CNP_LENGTH U(4)
#define ID_MMFR4_CNP_MASK U(0xf)
+/* ID_DFR0_EL1 definitions */
+#define ID_DFR0_TRACEFILT_SHIFT U(28)
+#define ID_DFR0_TRACEFILT_MASK U(0xf)
+#define ID_DFR0_TRACEFILT_SUPPORTED U(1)
+
+/* ID_DFR0_EL1 definitions */
+#define ID_DFR0_COPTRC_SHIFT U(12)
+#define ID_DFR0_COPTRC_MASK U(0xf)
+#define ID_DFR0_COPTRC_SUPPORTED U(1)
+
/* ID_PFR0 definitions */
#define ID_PFR0_AMU_SHIFT U(20)
#define ID_PFR0_AMU_LENGTH U(4)
@@ -430,10 +442,11 @@
/* PMCCFILTR definitions */
#define PMCCFILTR_EL0_P_BIT (U(1) << 31)
+#define PMCCFILTR_EL0_U_BIT (U(1) << 30)
#define PMCCFILTR_EL0_NSK_BIT (U(1) << 29)
+#define PMCCFILTR_EL0_NSU_BIT (U(1) << 28)
#define PMCCFILTR_EL0_NSH_BIT (U(1) << 27)
#define PMCCFILTR_EL0_M_BIT (U(1) << 26)
-#define PMCCFILTR_EL0_MT_BIT (U(1) << 25)
#define PMCCFILTR_EL0_SH_BIT (U(1) << 24)
/* PMU event counter ID definitions */
@@ -495,6 +508,50 @@
#define CNTP_CTL_IMASK_MASK U(1)
#define CNTP_CTL_ISTATUS_MASK U(1)
+/* Exception Syndrome register bits and bobs */
+#define ESR_EC_SHIFT U(26)
+#define ESR_EC_MASK U(0x3f)
+#define ESR_EC_LENGTH U(6)
+#define ESR_ISS_SHIFT U(0x0)
+#define ESR_ISS_MASK U(0x1ffffff)
+#define EC_UNKNOWN U(0x0)
+#define EC_WFE_WFI U(0x1)
+#define EC_CP15_MRC_MCR U(0x3)
+#define EC_CP15_MRRC_MCRR U(0x4)
+#define EC_CP14_MRC_MCR U(0x5)
+#define EC_CP14_LDC_STC U(0x6)
+#define EC_FP_SIMD U(0x7)
+#define EC_CP10_MRC U(0x8)
+#define EC_CP14_MRRC_MCRR U(0xc)
+#define EC_ILLEGAL U(0xe)
+#define EC_SVC U(0x11)
+#define EC_HVC U(0x12)
+#define EC_SMC U(0x13)
+#define EC_IABORT_LOWER_EL U(0x20)
+#define EC_IABORT_CUR_EL U(0x21)
+#define EC_PC_ALIGN U(0x22)
+#define EC_DABORT_LOWER_EL U(0x24)
+#define EC_DABORT_CUR_EL U(0x25)
+#define EC_SP_ALIGN U(0x26)
+#define EC_FP U(0x28)
+#define EC_SERROR U(0x2f)
+/* Data Fault Status code, not all error codes listed */
+#define ISS_DFSC_MASK U(0x3f)
+#define DFSC_EXT_DABORT U(0x10)
+#define DFSC_GPF_DABORT U(0x28)
+/* ISS encoding an exception from HVC or SVC instruction execution */
+#define ISS_HVC_SMC_IMM16_MASK U(0xffff)
+
+/*
+ * External Abort bit in Instruction and Data Aborts synchronous exception
+ * syndromes.
+ */
+#define ESR_ISS_EABORT_EA_BIT U(9)
+
+#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+#define ISS_BITS(x) (((x) >> ESR_ISS_SHIFT) & ESR_ISS_MASK)
+
+
/* MAIR macros */
#define MAIR0_ATTR_SET(attr, index) ((attr) << ((index) << U(3)))
#define MAIR1_ATTR_SET(attr, index) ((attr) << (((index) - U(3)) << U(3)))
@@ -522,6 +579,7 @@
#define ID_MMFR4 p15, 0, c0, c2, 6
#define ID_PFR0 p15, 0, c0, c1, 0
#define ID_PFR1 p15, 0, c0, c1, 1
+#define ID_DFR0 p15, 0, c0, c1, 2
#define MAIR0 p15, 0, c10, c2, 0
#define MAIR1 p15, 0, c10, c2, 1
#define TTBCR p15, 0, c2, c0, 2
@@ -725,4 +783,25 @@
#define AMEVTYPER1E p15, 0, c13, c15, 6
#define AMEVTYPER1F p15, 0, c13, c15, 7
+/*******************************************************************************
+ * Armv8.4 - Trace Filter System Registers
+ ******************************************************************************/
+#define TRFCR p15, 0, c1, c2, 1
+#define HTRFCR p15, 4, c1, c2, 1
+
+/*******************************************************************************
+ * Trace System Registers
+ ******************************************************************************/
+#define TRCAUXCTLR p14, 1, c0, c6, 0
+#define TRCRSR p14, 1, c0, c10, 0
+#define TRCCCCTLR p14, 1, c0, c14, 0
+#define TRCBBCTLR p14, 1, c0, c15, 0
+#define TRCEXTINSELR0 p14, 1, c0, c8, 4
+#define TRCEXTINSELR1 p14, 1, c0, c9, 4
+#define TRCEXTINSELR2 p14, 1, c0, c10, 4
+#define TRCEXTINSELR3 p14, 1, c0, c11, 4
+#define TRCCLAIMSET p14, 1, c7, c8, 6
+#define TRCCLAIMCLR p14, 1, c7, c9, 6
+#define TRCDEVARCH p14, 1, c7, c15, 6
+
#endif /* ARCH_H */
diff --git a/include/lib/aarch32/arch_features.h b/include/lib/aarch32/arch_features.h
index e2c2f2ccf..3c6a338d2 100644
--- a/include/lib/aarch32/arch_features.h
+++ b/include/lib/aarch32/arch_features.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -35,4 +35,28 @@ static inline uint32_t arch_get_debug_version(void)
DBGDIDR_VERSION_SHIFT);
}
+static inline bool get_armv8_4_trf_support(void)
+{
+ return ((read_id_dfr0() >> ID_DFR0_TRACEFILT_SHIFT) &
+ ID_DFR0_TRACEFILT_MASK) ==
+ ID_DFR0_TRACEFILT_SUPPORTED;
+}
+
+static inline bool is_armv8_4_dit_present(void)
+{
+ return ((read_id_pfr0() >> ID_PFR0_DIT_SHIFT) &
+ ID_PFR0_DIT_MASK) != 0;
+}
+
+static inline bool get_armv8_0_sys_reg_trace_support(void)
+{
+ return ((read_id_dfr0() >> ID_DFR0_COPTRC_SHIFT) &
+ ID_DFR0_COPTRC_MASK) ==
+ ID_DFR0_COPTRC_SUPPORTED;
+}
+
+static inline unsigned int get_armv9_2_feat_rme_support(void)
+{
+ return 0;
+}
#endif /* ARCH_FEATURES_H */
diff --git a/include/lib/aarch32/arch_helpers.h b/include/lib/aarch32/arch_helpers.h
index f2e3e009f..aca3952fc 100644
--- a/include/lib/aarch32/arch_helpers.h
+++ b/include/lib/aarch32/arch_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2022, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -209,6 +209,7 @@ DEFINE_SYSREG_RW_FUNCS(elr_hyp)
DEFINE_COPROCR_READ_FUNC(mpidr, MPIDR)
DEFINE_COPROCR_READ_FUNC(midr, MIDR)
DEFINE_COPROCR_READ_FUNC(id_mmfr4, ID_MMFR4)
+DEFINE_COPROCR_READ_FUNC(id_dfr0, ID_DFR0)
DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
DEFINE_COPROCR_READ_FUNC(isr, ISR)
@@ -294,6 +295,23 @@ DEFINE_COPROCR_RW_FUNCS_64(par, PAR_64)
DEFINE_COPROCR_RW_FUNCS(nsacr, NSACR)
+/* AArch32 coproc registers for trace filter */
+DEFINE_COPROCR_RW_FUNCS(htrfcr, HTRFCR)
+DEFINE_COPROCR_RW_FUNCS(trfcr, TRFCR)
+
+/* AArch32 Trace System Registers */
+DEFINE_COPROCR_RW_FUNCS(trcauxctlr, TRCAUXCTLR)
+DEFINE_COPROCR_RW_FUNCS(trcrsr, TRCRSR)
+DEFINE_COPROCR_RW_FUNCS(trcbbctlr, TRCBBCTLR)
+DEFINE_COPROCR_RW_FUNCS(trcccctlr, TRCCCCTLR)
+DEFINE_COPROCR_RW_FUNCS(trcextinselr0, TRCEXTINSELR0)
+DEFINE_COPROCR_RW_FUNCS(trcextinselr1, TRCEXTINSELR1)
+DEFINE_COPROCR_RW_FUNCS(trcextinselr2, TRCEXTINSELR2)
+DEFINE_COPROCR_RW_FUNCS(trcextinselr3, TRCEXTINSELR3)
+DEFINE_COPROCR_RW_FUNCS(trcclaimset, TRCCLAIMSET)
+DEFINE_COPROCR_RW_FUNCS(trcclaimclr, TRCCLAIMCLR)
+DEFINE_COPROCR_READ_FUNC(trcdevarch, TRCDEVARCH)
+
/* AArch32 coproc registers for 32bit MMU descriptor support */
DEFINE_COPROCR_RW_FUNCS(prrr, PRRR)
DEFINE_COPROCR_RW_FUNCS(nmrr, NMRR)
@@ -404,6 +422,9 @@ static inline unsigned int get_current_el(void)
#define read_daif() read_cpsr()
#define write_daif(flags) write_cpsr(flags)
+#define read_dit() read_cpsr()
+#define write_dit(flags) write_cpsr(flags)
+
#define read_cnthp_cval_el2() read64_cnthp_cval_el2()
#define write_cnthp_cval_el2(v) write64_cnthp_cval_el2(v)
diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h
index f2681676a..6a0d28611 100644
--- a/include/lib/aarch64/arch.h
+++ b/include/lib/aarch64/arch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -16,7 +16,7 @@
#define MIDR_IMPL_SHIFT U(0x18)
#define MIDR_VAR_SHIFT U(20)
#define MIDR_VAR_BITS U(4)
-#define MIDR_VAR_MASK U(0xf)
+#define MIDR_VAR_MASK U(0xf0)
#define MIDR_REV_SHIFT U(0)
#define MIDR_REV_BITS U(4)
#define MIDR_REV_MASK U(0xf)
@@ -85,15 +85,21 @@
#define ICC_CTLR_EL3 S3_6_C12_C12_4
#define ICC_PMR_EL1 S3_0_C4_C6_0
#define ICC_RPR_EL1 S3_0_C12_C11_3
-#define ICC_IGRPEN1_EL3 S3_6_c12_c12_7
-#define ICC_IGRPEN0_EL1 S3_0_c12_c12_6
-#define ICC_HPPIR0_EL1 S3_0_c12_c8_2
-#define ICC_HPPIR1_EL1 S3_0_c12_c12_2
-#define ICC_IAR0_EL1 S3_0_c12_c8_0
-#define ICC_IAR1_EL1 S3_0_c12_c12_0
-#define ICC_EOIR0_EL1 S3_0_c12_c8_1
-#define ICC_EOIR1_EL1 S3_0_c12_c12_1
-#define ICC_SGI0R_EL1 S3_0_c12_c11_7
+#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7
+#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6
+#define ICC_HPPIR0_EL1 S3_0_C12_C8_2
+#define ICC_HPPIR1_EL1 S3_0_C12_C12_2
+#define ICC_IAR0_EL1 S3_0_C12_C8_0
+#define ICC_IAR1_EL1 S3_0_C12_C12_0
+#define ICC_EOIR0_EL1 S3_0_C12_C8_1
+#define ICC_EOIR1_EL1 S3_0_C12_C12_1
+#define ICC_SGI0R_EL1 S3_0_C12_C11_7
+
+#define ICV_CTRL_EL1 S3_0_C12_C12_4
+#define ICV_IAR1_EL1 S3_0_C12_C12_0
+#define ICV_IGRPEN1_EL1 S3_0_C12_C12_7
+#define ICV_EOIR1_EL1 S3_0_C12_C12_1
+#define ICV_PMR_EL1 S3_0_C4_C6_0
/*******************************************************************************
* Generic timer memory mapped registers & offsets
@@ -122,34 +128,61 @@
#define DCCSW U(0x2)
/* ID_AA64PFR0_EL1 definitions */
-#define ID_AA64PFR0_EL0_SHIFT U(0)
-#define ID_AA64PFR0_EL1_SHIFT U(4)
-#define ID_AA64PFR0_EL2_SHIFT U(8)
-#define ID_AA64PFR0_EL3_SHIFT U(12)
-#define ID_AA64PFR0_AMU_SHIFT U(44)
-#define ID_AA64PFR0_AMU_LENGTH U(4)
-#define ID_AA64PFR0_AMU_MASK ULL(0xf)
-#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
-#define ID_AA64PFR0_AMU_V1 U(0x1)
-#define ID_AA64PFR0_AMU_V1P1 U(0x2)
-#define ID_AA64PFR0_ELX_MASK ULL(0xf)
-#define ID_AA64PFR0_SVE_SHIFT U(32)
-#define ID_AA64PFR0_SVE_MASK ULL(0xf)
-#define ID_AA64PFR0_SVE_LENGTH U(4)
-#define ID_AA64PFR0_MPAM_SHIFT U(40)
-#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
-#define ID_AA64PFR0_DIT_SHIFT U(48)
-#define ID_AA64PFR0_DIT_MASK ULL(0xf)
-#define ID_AA64PFR0_DIT_LENGTH U(4)
-#define ID_AA64PFR0_DIT_SUPPORTED U(1)
-#define ID_AA64PFR0_CSV2_SHIFT U(56)
-#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
-#define ID_AA64PFR0_CSV2_LENGTH U(4)
+#define ID_AA64PFR0_EL0_SHIFT U(0)
+#define ID_AA64PFR0_EL1_SHIFT U(4)
+#define ID_AA64PFR0_EL2_SHIFT U(8)
+#define ID_AA64PFR0_EL3_SHIFT U(12)
+#define ID_AA64PFR0_AMU_SHIFT U(44)
+#define ID_AA64PFR0_AMU_LENGTH U(4)
+#define ID_AA64PFR0_AMU_MASK ULL(0xf)
+#define ID_AA64PFR0_AMU_NOT_SUPPORTED U(0x0)
+#define ID_AA64PFR0_AMU_V1 U(0x1)
+#define ID_AA64PFR0_AMU_V1P1 U(0x2)
+#define ID_AA64PFR0_ELX_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_SHIFT U(32)
+#define ID_AA64PFR0_SVE_WIDTH U(4)
+#define ID_AA64PFR0_SVE_MASK ULL(0xf)
+#define ID_AA64PFR0_SVE_LENGTH U(4)
+#define ID_AA64PFR0_MPAM_SHIFT U(40)
+#define ID_AA64PFR0_MPAM_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_SHIFT U(48)
+#define ID_AA64PFR0_DIT_MASK ULL(0xf)
+#define ID_AA64PFR0_DIT_LENGTH U(4)
+#define ID_AA64PFR0_DIT_SUPPORTED U(1)
+#define ID_AA64PFR0_CSV2_SHIFT U(56)
+#define ID_AA64PFR0_CSV2_MASK ULL(0xf)
+#define ID_AA64PFR0_CSV2_WIDTH U(4)
+#define ID_AA64PFR0_CSV2_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_CSV2_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_CSV2_2_SUPPORTED ULL(0x2)
+#define ID_AA64PFR0_FEAT_RME_SHIFT U(52)
+#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf)
+#define ID_AA64PFR0_FEAT_RME_LENGTH U(4)
+#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0)
+#define ID_AA64PFR0_FEAT_RME_V1 U(1)
+#define ID_AA64PFR0_RAS_MASK ULL(0xf)
+#define ID_AA64PFR0_RAS_SHIFT U(28)
+#define ID_AA64PFR0_RAS_WIDTH U(4)
+#define ID_AA64PFR0_RAS_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_RAS_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_RASV1P1_SUPPORTED ULL(0x2)
+#define ID_AA64PFR0_GIC_SHIFT U(24)
+#define ID_AA64PFR0_GIC_WIDTH U(4)
+#define ID_AA64PFR0_GIC_MASK ULL(0xf)
+#define ID_AA64PFR0_GIC_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR0_GICV3_GICV4_SUPPORTED ULL(0x1)
+#define ID_AA64PFR0_GICV4_1_SUPPORTED ULL(0x2)
/* ID_AA64DFR0_EL1.PMS definitions (for ARMv8.2+) */
-#define ID_AA64DFR0_PMS_SHIFT U(32)
-#define ID_AA64DFR0_PMS_LENGTH U(4)
-#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+#define ID_AA64DFR0_PMS_SHIFT U(32)
+#define ID_AA64DFR0_PMS_LENGTH U(4)
+#define ID_AA64DFR0_PMS_MASK ULL(0xf)
+#define ID_AA64DFR0_SPE_NOT_SUPPORTED U(0)
+#define ID_AA64DFR0_SPE U(1)
+#define ID_AA64DFR0_SPE_V1P1 U(2)
+#define ID_AA64DFR0_SPE_V1P2 U(3)
+#define ID_AA64DFR0_SPE_V1P3 U(4)
+#define ID_AA64DFR0_SPE_V1P4 U(5)
/* ID_AA64DFR0_EL1.DEBUG definitions */
#define ID_AA64DFR0_DEBUG_SHIFT U(0)
@@ -162,28 +195,90 @@
#define ID_AA64DFR0_V8_2_DEBUG_ARCH_SUPPORTED U(8)
#define ID_AA64DFR0_V8_4_DEBUG_ARCH_SUPPORTED U(9)
+/* ID_AA64DFR0_EL1.HPMN0 definitions */
+#define ID_AA64DFR0_HPMN0_SHIFT U(60)
+#define ID_AA64DFR0_HPMN0_MASK ULL(0xf)
+#define ID_AA64DFR0_HPMN0_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.BRBE definitions */
+#define ID_AA64DFR0_BRBE_SHIFT U(52)
+#define ID_AA64DFR0_BRBE_MASK ULL(0xf)
+#define ID_AA64DFR0_BRBE_SUPPORTED ULL(1)
+
+/* ID_AA64DFR0_EL1.TraceBuffer definitions */
+#define ID_AA64DFR0_TRACEBUFFER_SHIFT U(44)
+#define ID_AA64DFR0_TRACEBUFFER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEBUFFER_SUPPORTED ULL(1)
+
+/* ID_DFR0_EL1.Tracefilt definitions */
+#define ID_AA64DFR0_TRACEFILT_SHIFT U(40)
+#define ID_AA64DFR0_TRACEFILT_MASK U(0xf)
+#define ID_AA64DFR0_TRACEFILT_SUPPORTED U(1)
+
+/* ID_AA64DFR0_EL1.PMUVer definitions */
+#define ID_AA64DFR0_PMUVER_SHIFT U(8)
+#define ID_AA64DFR0_PMUVER_MASK ULL(0xf)
+#define ID_AA64DFR0_PMUVER_NOT_SUPPORTED ULL(0)
+
+/* ID_AA64DFR0_EL1.TraceVer definitions */
+#define ID_AA64DFR0_TRACEVER_SHIFT U(4)
+#define ID_AA64DFR0_TRACEVER_MASK ULL(0xf)
+#define ID_AA64DFR0_TRACEVER_SUPPORTED ULL(1)
+
#define EL_IMPL_NONE ULL(0)
#define EL_IMPL_A64ONLY ULL(1)
#define EL_IMPL_A64_A32 ULL(2)
-#define ID_AA64PFR0_GIC_SHIFT U(24)
-#define ID_AA64PFR0_GIC_WIDTH U(4)
-#define ID_AA64PFR0_GIC_MASK ULL(0xf)
+/* ID_AA64ISAR0_EL1 definitions */
+#define ID_AA64ISAR0_EL1 S3_0_C0_C6_0
+#define ID_AA64ISAR0_TLB_MASK ULL(0xf)
+#define ID_AA64ISAR0_TLB_SHIFT U(56)
+#define ID_AA64ISAR0_TLB_WIDTH U(4)
+#define ID_AA64ISAR0_TLBIRANGE_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR0_TLB_NOT_SUPPORTED ULL(0)
/* ID_AA64ISAR1_EL1 definitions */
-#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
-#define ID_AA64ISAR1_GPI_SHIFT U(28)
-#define ID_AA64ISAR1_GPI_WIDTH U(4)
-#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
-#define ID_AA64ISAR1_GPA_SHIFT U(24)
-#define ID_AA64ISAR1_GPA_WIDTH U(4)
-#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
-#define ID_AA64ISAR1_API_SHIFT U(8)
-#define ID_AA64ISAR1_API_WIDTH U(4)
-#define ID_AA64ISAR1_API_MASK ULL(0xf)
-#define ID_AA64ISAR1_APA_SHIFT U(4)
-#define ID_AA64ISAR1_APA_WIDTH U(4)
-#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
+#define ID_AA64ISAR1_GPI_SHIFT U(28)
+#define ID_AA64ISAR1_GPI_WIDTH U(4)
+#define ID_AA64ISAR1_GPI_MASK ULL(0xf)
+#define ID_AA64ISAR1_GPA_SHIFT U(24)
+#define ID_AA64ISAR1_GPA_WIDTH U(4)
+#define ID_AA64ISAR1_GPA_MASK ULL(0xf)
+#define ID_AA64ISAR1_API_SHIFT U(8)
+#define ID_AA64ISAR1_API_WIDTH U(4)
+#define ID_AA64ISAR1_API_MASK ULL(0xf)
+#define ID_AA64ISAR1_APA_SHIFT U(4)
+#define ID_AA64ISAR1_APA_WIDTH U(4)
+#define ID_AA64ISAR1_APA_MASK ULL(0xf)
+#define ID_AA64ISAR1_SPECRES_MASK ULL(0xf)
+#define ID_AA64ISAR1_SPECRES_SHIFT U(40)
+#define ID_AA64ISAR1_SPECRES_WIDTH U(4)
+#define ID_AA64ISAR1_SPECRES_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_SPECRES_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_DPB_MASK ULL(0xf)
+#define ID_AA64ISAR1_DPB_SHIFT U(0)
+#define ID_AA64ISAR1_DPB_WIDTH U(4)
+#define ID_AA64ISAR1_DPB_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_DPB_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_DPB2_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR1_LS64_MASK ULL(0xf)
+#define ID_AA64ISAR1_LS64_SHIFT U(60)
+#define ID_AA64ISAR1_LS64_WIDTH U(4)
+#define ID_AA64ISAR1_LS64_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64ISAR1_LS64_SUPPORTED ULL(0x1)
+#define ID_AA64ISAR1_LS64_V_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR1_LS64_ACCDATA_SUPPORTED ULL(0x3)
+
+/* ID_AA64ISAR2_EL1 definitions */
+#define ID_AA64ISAR2_EL1 S3_0_C0_C6_2
+#define ID_AA64ISAR2_WFXT_MASK ULL(0xf)
+#define ID_AA64ISAR2_WFXT_SHIFT U(0x0)
+#define ID_AA64ISAR2_WFXT_SUPPORTED ULL(0x2)
+#define ID_AA64ISAR2_GPA3_SHIFT U(8)
+#define ID_AA64ISAR2_GPA3_MASK ULL(0xf)
+#define ID_AA64ISAR2_APA3_SHIFT U(12)
+#define ID_AA64ISAR2_APA3_MASK ULL(0xf)
/* ID_AA64MMFR0_EL1 definitions */
#define ID_AA64MMFR0_EL1_PARANGE_SHIFT U(0)
@@ -209,19 +304,68 @@
#define ID_AA64MMFR0_EL1_FGT_SUPPORTED ULL(0x1)
#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT U(28)
+#define ID_AA64MMFR0_EL1_TGRAN4_WIDTH U(4)
#define ID_AA64MMFR0_EL1_TGRAN4_MASK ULL(0xf)
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_52B_SUPPORTED ULL(0x1)
#define ID_AA64MMFR0_EL1_TGRAN4_NOT_SUPPORTED ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT U(40)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR0_EL1_TGRAN4_2_52B_SUPPORTED ULL(0x3)
+
#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT U(24)
+#define ID_AA64MMFR0_EL1_TGRAN64_WIDTH U(4)
#define ID_AA64MMFR0_EL1_TGRAN64_MASK ULL(0xf)
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED ULL(0x0)
#define ID_AA64MMFR0_EL1_TGRAN64_NOT_SUPPORTED ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT U(36)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN64_2_SUPPORTED ULL(0x2)
+
#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT U(20)
+#define ID_AA64MMFR0_EL1_TGRAN16_WIDTH U(4)
#define ID_AA64MMFR0_EL1_TGRAN16_MASK ULL(0xf)
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED ULL(0x1)
#define ID_AA64MMFR0_EL1_TGRAN16_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN16_52B_SUPPORTED ULL(0x2)
+
+#define ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT U(32)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_WIDTH U(4)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_MASK ULL(0xf)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_AS_1 ULL(0x0)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_NOT_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR0_EL1_TGRAN16_2_52B_SUPPORTED ULL(0x3)
+
+/* ID_AA64MMFR1_EL1 definitions */
+#define ID_AA64MMFR1_EL1_PAN_SHIFT U(20)
+#define ID_AA64MMFR1_EL1_PAN_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_PAN_WIDTH U(4)
+#define ID_AA64MMFR1_EL1_PAN_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_PAN2_SUPPORTED ULL(0x2)
+#define ID_AA64MMFR1_EL1_PAN3_SUPPORTED ULL(0x3)
+#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
+#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_AFP_SHIFT U(44)
+#define ID_AA64MMFR1_EL1_AFP_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_AFP_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_LO_SHIFT U(16)
+#define ID_AA64MMFR1_EL1_LO_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_LO_WIDTH U(4)
+#define ID_AA64MMFR1_EL1_LOR_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_LOR_SUPPORTED ULL(0x1)
+
/* ID_AA64MMFR2_EL1 definitions */
#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
@@ -246,10 +390,37 @@
#define ID_AA64PFR1_EL1_MTE_SHIFT U(8)
#define ID_AA64PFR1_EL1_MTE_MASK ULL(0xf)
+#define ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT U(28)
+#define ID_AA64PFR1_EL1_RNDR_TRAP_MASK ULL(0xf)
+
+#define ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_EL1_RNG_TRAP_NOT_SUPPORTED ULL(0x0)
+
+#define ID_AA64PFR1_CSV2_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_CSV2_FRAC_SHIFT U(32)
+#define ID_AA64PFR1_CSV2_FRAC_WIDTH U(4)
+#define ID_AA64PFR1_CSV2_1P1_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_CSV2_1P2_SUPPORTED ULL(0x2)
+
#define MTE_UNIMPLEMENTED ULL(0)
#define MTE_IMPLEMENTED_EL0 ULL(1) /* MTE is only implemented at EL0 */
#define MTE_IMPLEMENTED_ELX ULL(2) /* MTE is implemented at all ELs */
+#define ID_AA64PFR1_EL1_SME_SHIFT U(24)
+#define ID_AA64PFR1_EL1_SME_MASK ULL(0xf)
+#define ID_AA64PFR1_EL1_SME_WIDTH ULL(0x4)
+#define ID_AA64PFR1_EL1_SME_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64PFR1_EL1_SME_SUPPORTED ULL(0x1)
+#define ID_AA64PFR1_EL1_SME2_SUPPORTED ULL(0x2)
+
+#define ID_AA64PFR1_RAS_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_RAS_FRAC_SHIFT U(12)
+#define ID_AA64PFR1_RAS_FRAC_WIDTH U(4)
+#define ID_AA64PFR1_RASV1P1_SUPPORTED ULL(0x1)
+
+#define ID_AA64PFR1_MPAM_FRAC_SHIFT U(16)
+#define ID_AA64PFR1_MPAM_FRAC_MASK ULL(0xf)
+
/* ID_PFR1_EL1 definitions */
#define ID_PFR1_VIRTEXT_SHIFT U(12)
#define ID_PFR1_VIRTEXT_MASK U(0xf)
@@ -289,6 +460,7 @@
#define SCTLR_WXN_BIT (ULL(1) << 19)
#define SCTLR_UWXN_BIT (ULL(1) << 20)
#define SCTLR_IESB_BIT (ULL(1) << 21)
+#define SCTLR_SPAN_BIT (ULL(1) << 23)
#define SCTLR_E0E_BIT (ULL(1) << 24)
#define SCTLR_EE_BIT (ULL(1) << 25)
#define SCTLR_UCI_BIT (ULL(1) << 26)
@@ -304,6 +476,16 @@
#define CPACR_EL1_FP_TRAP_ALL U(0x2)
#define CPACR_EL1_FP_TRAP_NONE U(0x3)
+#define CPACR_EL1_ZEN(x) ((x) << 16)
+#define CPACR_EL1_ZEN_TRAP_EL0 U(0x1)
+#define CPACR_EL1_ZEN_TRAP_ALL U(0x2)
+#define CPACR_EL1_ZEN_TRAP_NONE U(0x3)
+
+#define CPACR_EL1_SMEN(x) ((x) << 24)
+#define CPACR_EL1_SMEN_TRAP_EL0 U(0x1)
+#define CPACR_EL1_SMEN_TRAP_ALL U(0x2)
+#define CPACR_EL1_SMEN_TRAP_NONE U(0x3)
+
/* SCR definitions */
#define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5))
#define SCR_AMVOFFEN_BIT (UL(1) << 35)
@@ -350,6 +532,8 @@
#define MDCR_EL2_HPME_BIT (U(1) << 7)
#define MDCR_EL2_TPM_BIT (U(1) << 6)
#define MDCR_EL2_TPMCR_BIT (U(1) << 5)
+#define MDCR_EL2_HPMN_SHIFT U(0)
+#define MDCR_EL2_HPMN_MASK ULL(0x1f)
#define MDCR_EL2_RESET_VAL U(0x0)
/* HSTR_EL2 definitions */
@@ -371,6 +555,7 @@
#define HCR_AMVOFFEN_BIT (ULL(1) << 51)
#define HCR_API_BIT (ULL(1) << 41)
#define HCR_APK_BIT (ULL(1) << 40)
+#define HCR_E2H_BIT (ULL(1) << 34)
#define HCR_TGE_BIT (ULL(1) << 27)
#define HCR_RW_SHIFT U(31)
#define HCR_RW_BIT (ULL(1) << HCR_RW_SHIFT)
@@ -399,19 +584,12 @@
#define EVNTI_SHIFT U(4)
#define EVNTI_MASK U(0xf)
-/* CPTR_EL3 definitions */
-#define TCPAC_BIT (U(1) << 31)
-#define TAM_BIT (U(1) << 30)
-#define TTA_BIT (U(1) << 20)
-#define TFP_BIT (U(1) << 10)
-#define CPTR_EZ_BIT (U(1) << 8)
-#define CPTR_EL3_RESET_VAL U(0x0)
-
/* CPTR_EL2 definitions */
-#define CPTR_EL2_RES1 ((ULL(3) << 12) | (ULL(1) << 9) | (ULL(0xff)))
+#define CPTR_EL2_RES1 ((ULL(1) << 13) | (ULL(1) << 9) | (ULL(0xff)))
#define CPTR_EL2_TCPAC_BIT (ULL(1) << 31)
#define CPTR_EL2_TAM_BIT (ULL(1) << 30)
#define CPTR_EL2_TTA_BIT (ULL(1) << 20)
+#define CPTR_EL2_TSM_BIT (ULL(1) << 12)
#define CPTR_EL2_TFP_BIT (ULL(1) << 10)
#define CPTR_EL2_TZ_BIT (ULL(1) << 8)
#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
@@ -600,6 +778,13 @@
#define MAX_CACHE_LINE_SIZE U(0x800) /* 2KB */
+/*
+ * FPCR definitions
+ */
+#define FPCR_FIZ_BIT (ULL(1) << 0)
+#define FPCR_AH_BIT (ULL(1) << 1)
+#define FPCR_NEP_BIT (ULL(1) << 2)
+
/* Physical timer control register bit fields shifts and masks */
#define CNTP_CTL_ENABLE_SHIFT U(0)
#define CNTP_CTL_IMASK_SHIFT U(1)
@@ -613,6 +798,8 @@
#define ESR_EC_SHIFT U(26)
#define ESR_EC_MASK U(0x3f)
#define ESR_EC_LENGTH U(6)
+#define ESR_ISS_SHIFT U(0x0)
+#define ESR_ISS_MASK U(0x1ffffff)
#define EC_UNKNOWN U(0x0)
#define EC_WFE_WFI U(0x1)
#define EC_AARCH32_CP15_MRC_MCR U(0x3)
@@ -639,6 +826,36 @@
#define EC_AARCH32_FP U(0x28)
#define EC_AARCH64_FP U(0x2c)
#define EC_SERROR U(0x2f)
+/* Data Fault Status code, not all error codes listed */
+#define ISS_DFSC_MASK U(0x3f)
+#define DFSC_L0_ADR_SIZE_FAULT U(0)
+#define DFSC_L0_TRANS_FAULT U(4)
+#define DFSC_L1_TRANS_FAULT U(5)
+#define DFSC_L2_TRANS_FAULT U(6)
+#define DFSC_L3_TRANS_FAULT U(7)
+#define DFSC_NO_WALK_SEA U(0x10)
+#define DFSC_L0_SEA U(0x14)
+#define DFSC_L1_SEA U(0x15)
+#define DFSC_L2_SEA U(0x16)
+#define DFSC_L3_SEA U(0x17)
+#define DFSC_EXT_DABORT U(0x10)
+#define DFSC_GPF_DABORT U(0x28)
+
+/* Instr Fault Status code, not all error codes listed */
+#define ISS_IFSC_MASK U(0x3f)
+#define IFSC_L0_ADR_SIZE_FAULT U(0)
+#define IFSC_L0_TRANS_FAULT U(4)
+#define IFSC_L1_TRANS_FAULT U(5)
+#define IFSC_L2_TRANS_FAULT U(6)
+#define IFSC_L3_TRANS_FAULT U(7)
+#define IFSC_NO_WALK_SEA U(0x10)
+#define IFSC_L0_SEA U(0x24)
+#define IFSC_L1_SEA U(0x25)
+#define IFSC_L2_SEA U(0x26)
+#define IFSC_L3_SEA U(0x27)
+
+/* ISS encoding an exception from HVC or SVC instruction execution */
+#define ISS_HVC_SMC_IMM16_MASK U(0xffff)
/*
* External Abort bit in Instruction and Data Aborts synchronous exception
@@ -647,6 +864,7 @@
#define ESR_ISS_EABORT_EA_BIT U(9)
#define EC_BITS(x) (((x) >> ESR_EC_SHIFT) & ESR_EC_MASK)
+#define ISS_BITS(x) (((x) >> ESR_ISS_SHIFT) & ESR_ISS_MASK)
/* Reset bit inside the Reset management register for EL3 (RMR_EL3) */
#define RMR_RESET_REQUEST_SHIFT U(0x1)
@@ -699,6 +917,8 @@
#define PMCR_EL0_DP_BIT (U(1) << 5)
#define PMCR_EL0_X_BIT (U(1) << 4)
#define PMCR_EL0_D_BIT (U(1) << 3)
+#define PMCR_EL0_C_BIT (U(1) << 2)
+#define PMCR_EL0_P_BIT (U(1) << 1)
#define PMCR_EL0_E_BIT (U(1) << 0)
/* PMCNTENSET_EL0 definitions */
@@ -707,20 +927,34 @@
/* PMEVTYPER<n>_EL0 definitions */
#define PMEVTYPER_EL0_P_BIT (U(1) << 31)
+#define PMEVTYPER_EL0_U_BIT (U(1) << 30)
#define PMEVTYPER_EL0_NSK_BIT (U(1) << 29)
+#define PMEVTYPER_EL0_NSU_BIT (U(1) << 28)
#define PMEVTYPER_EL0_NSH_BIT (U(1) << 27)
#define PMEVTYPER_EL0_M_BIT (U(1) << 26)
#define PMEVTYPER_EL0_MT_BIT (U(1) << 25)
#define PMEVTYPER_EL0_SH_BIT (U(1) << 24)
-#define PMEVTYPER_EL0_EVTCOUNT_BITS U(0x000003FF)
+#define PMEVTYPER_EL0_T_BIT (U(1) << 23)
+#define PMEVTYPER_EL0_RLK_BIT (U(1) << 22)
+#define PMEVTYPER_EL0_RLU_BIT (U(1) << 21)
+#define PMEVTYPER_EL0_RLH_BIT (U(1) << 20)
+#define PMEVTYPER_EL0_EVTCOUNT_BITS U(0x0000FFFF)
/* PMCCFILTR_EL0 definitions */
#define PMCCFILTR_EL0_P_BIT (U(1) << 31)
+#define PMCCFILTR_EL0_U_BIT (U(1) << 30)
#define PMCCFILTR_EL0_NSK_BIT (U(1) << 29)
#define PMCCFILTR_EL0_NSH_BIT (U(1) << 27)
#define PMCCFILTR_EL0_M_BIT (U(1) << 26)
-#define PMCCFILTR_EL0_MT_BIT (U(1) << 25)
#define PMCCFILTR_EL0_SH_BIT (U(1) << 24)
+#define PMCCFILTR_EL0_T_BIT (U(1) << 23)
+#define PMCCFILTR_EL0_RLK_BIT (U(1) << 22)
+#define PMCCFILTR_EL0_RLU_BIT (U(1) << 21)
+#define PMCCFILTR_EL0_RLH_BIT (U(1) << 20)
+
+/* PMSELR_EL0 definitions */
+#define PMSELR_EL0_SEL_SHIFT U(0)
+#define PMSELR_EL0_SEL_MASK U(0x1f)
/* PMU event counter ID definitions */
#define PMU_EV_PC_WRITE_RETIRED U(0x000C)
@@ -728,14 +962,54 @@
/*******************************************************************************
* Definitions for system register interface to SVE
******************************************************************************/
-#define ZCR_EL3 S3_6_C1_C2_0
+#define ID_AA64ZFR0_EL1 S3_0_C0_C4_4
+
+/* ZCR_EL2 definitions */
#define ZCR_EL2 S3_4_C1_C2_0
+#define ZCR_EL2_SVE_VL_SHIFT UL(0)
+#define ZCR_EL2_SVE_VL_WIDTH UL(4)
-/* ZCR_EL3 definitions */
-#define ZCR_EL3_LEN_MASK U(0xf)
+/* ZCR_EL1 definitions */
+#define ZCR_EL1 S3_0_C1_C2_0
+#define ZCR_EL1_SVE_VL_SHIFT UL(0)
+#define ZCR_EL1_SVE_VL_WIDTH UL(4)
-/* ZCR_EL2 definitions */
-#define ZCR_EL2_LEN_MASK U(0xf)
+/*******************************************************************************
+ * Definitions for system register interface to SME
+ ******************************************************************************/
+#define ID_AA64SMFR0_EL1 S3_0_C0_C4_5
+#define SVCR S3_3_C4_C2_2
+#define TPIDR2_EL0 S3_3_C13_C0_5
+#define SMCR_EL2 S3_4_C1_C2_6
+
+/* ID_AA64SMFR0_EL1 definitions */
+#define ID_AA64SMFR0_EL1_FA64_BIT (UL(1) << 63)
+
+/* SVCR definitions */
+#define SVCR_ZA_BIT (U(1) << 1)
+#define SVCR_SM_BIT (U(1) << 0)
+
+/* SMPRI_EL1 definitions */
+#define SMPRI_EL1_PRIORITY_SHIFT U(0)
+#define SMPRI_EL1_PRIORITY_MASK U(0xf)
+
+/* SMPRIMAP_EL2 definitions */
+/* Register is composed of 16 priority map fields of 4 bits numbered 0-15. */
+#define SMPRIMAP_EL2_MAP_SHIFT(pri) U((pri) * 4)
+#define SMPRIMAP_EL2_MAP_MASK U(0xf)
+
+/* SMCR_ELx definitions */
+#define SMCR_ELX_LEN_SHIFT U(0)
+#define SMCR_ELX_LEN_WIDTH U(4)
+/*
+ * SMCR_ELX_RAZ_LEN is defined to find the architecturally permitted SVL. This
+ * is a combination of RAZ and LEN bit fields.
+ */
+#define SMCR_ELX_RAZ_LEN_SHIFT UL(0)
+#define SMCR_ELX_RAZ_LEN_WIDTH UL(9)
+#define SMCR_ELX_EZT0_BIT (U(1) << 30)
+#define SMCR_ELX_FA64_BIT (U(1) << 31)
+#define SMCR_EL2_RESET_VAL (SMCR_ELX_EZT0_BIT | SMCR_ELX_FA64_BIT)
/*******************************************************************************
* Definitions of MAIR encodings for device and normal memory
@@ -796,7 +1070,18 @@
/*******************************************************************************
* Definitions for system register interface to SPE
******************************************************************************/
+#define PMSCR_EL1 S3_0_C9_C9_0
+#define PMSNEVFR_EL1 S3_0_C9_C9_1
+#define PMSICR_EL1 S3_0_C9_C9_2
+#define PMSIRR_EL1 S3_0_C9_C9_3
+#define PMSFCR_EL1 S3_0_C9_C9_4
+#define PMSEVFR_EL1 S3_0_C9_C9_5
+#define PMSLATFR_EL1 S3_0_C9_C9_6
+#define PMSIDR_EL1 S3_0_C9_C9_7
#define PMBLIMITR_EL1 S3_0_C9_C10_0
+#define PMBPTR_EL1 S3_0_C9_C10_1
+#define PMBSR_EL1 S3_0_C9_C10_3
+#define PMSCR_EL2 S3_4_C9_C9_0
/*******************************************************************************
* Definitions for system register interface to MPAM
@@ -952,6 +1237,12 @@
#define ERXPFGCTL_CDEN_BIT (U(1) << 31)
/*******************************************************************************
+ * Armv8.1 Registers - Privileged Access Never Registers
+ ******************************************************************************/
+#define PAN S3_0_C4_C2_3
+#define PAN_BIT BIT(22)
+
+/*******************************************************************************
* Armv8.3 Pointer Authentication Registers
******************************************************************************/
#define APIAKeyLo_EL1 S3_0_C2_C1_0
@@ -998,5 +1289,122 @@
******************************************************************************/
#define CNTPOFF_EL2 S3_4_C14_C0_6
+/*******************************************************************************
+ * Armv9.0 - Trace Buffer Extension System Registers
+ ******************************************************************************/
+#define TRBLIMITR_EL1 S3_0_C9_C11_0
+#define TRBPTR_EL1 S3_0_C9_C11_1
+#define TRBBASER_EL1 S3_0_C9_C11_2
+#define TRBSR_EL1 S3_0_C9_C11_3
+#define TRBMAR_EL1 S3_0_C9_C11_4
+#define TRBTRG_EL1 S3_0_C9_C11_6
+#define TRBIDR_EL1 S3_0_C9_C11_7
+
+/*******************************************************************************
+ * FEAT_BRBE - Branch Record Buffer Extension System Registers
+ ******************************************************************************/
+
+#define BRBCR_EL1 S2_1_C9_C0_0
+#define BRBCR_EL2 S2_4_C9_C0_0
+#define BRBFCR_EL1 S2_1_C9_C0_1
+#define BRBTS_EL1 S2_1_C9_C0_2
+#define BRBINFINJ_EL1 S2_1_C9_C1_0
+#define BRBSRCINJ_EL1 S2_1_C9_C1_1
+#define BRBTGTINJ_EL1 S2_1_C9_C1_2
+#define BRBIDR0_EL1 S2_1_C9_C2_0
+
+/*******************************************************************************
+ * Armv8.4 - Trace Filter System Registers
+ ******************************************************************************/
+#define TRFCR_EL1 S3_0_C1_C2_1
+#define TRFCR_EL2 S3_4_C1_C2_1
+
+/*******************************************************************************
+ * Trace System Registers
+ ******************************************************************************/
+#define TRCAUXCTLR S2_1_C0_C6_0
+#define TRCRSR S2_1_C0_C10_0
+#define TRCCCCTLR S2_1_C0_C14_0
+#define TRCBBCTLR S2_1_C0_C15_0
+#define TRCEXTINSELR0 S2_1_C0_C8_4
+#define TRCEXTINSELR1 S2_1_C0_C9_4
+#define TRCEXTINSELR2 S2_1_C0_C10_4
+#define TRCEXTINSELR3 S2_1_C0_C11_4
+#define TRCCLAIMSET S2_1_c7_c8_6
+#define TRCCLAIMCLR S2_1_c7_c9_6
+#define TRCDEVARCH S2_1_c7_c15_6
+
+/*******************************************************************************
+ * FEAT_HCX - Extended Hypervisor Configuration Register
+ ******************************************************************************/
+#define HCRX_EL2 S3_4_C1_C2_2
+#define HCRX_EL2_MSCEn_BIT (UL(1) << 11)
+#define HCRX_EL2_MCE2_BIT (UL(1) << 10)
+#define HCRX_EL2_CMOW_BIT (UL(1) << 9)
+#define HCRX_EL2_VFNMI_BIT (UL(1) << 8)
+#define HCRX_EL2_VINMI_BIT (UL(1) << 7)
+#define HCRX_EL2_TALLINT_BIT (UL(1) << 6)
+#define HCRX_EL2_SMPME_BIT (UL(1) << 5)
+#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
+#define HCRX_EL2_FnXS_BIT (UL(1) << 3)
+#define HCRX_EL2_EnASR_BIT (UL(1) << 2)
+#define HCRX_EL2_EnALS_BIT (UL(1) << 1)
+#define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
+#define HCRX_EL2_INIT_VAL ULL(0x0)
+
+/*******************************************************************************
+ * PFR0_EL1 - Definitions for AArch32 Processor Feature Register 0
+ ******************************************************************************/
+#define ID_PFR0_EL1 S3_0_C0_C1_0
+#define ID_PFR0_EL1_RAS_MASK ULL(0xf)
+#define ID_PFR0_EL1_RAS_SHIFT U(28)
+#define ID_PFR0_EL1_RAS_WIDTH U(4)
+#define ID_PFR0_EL1_RAS_SUPPORTED ULL(0x1)
+#define ID_PFR0_EL1_RASV1P1_SUPPORTED ULL(0x2)
+
+/*******************************************************************************
+ * PFR2_EL1 - Definitions for AArch32 Processor Feature Register 2
+ ******************************************************************************/
+#define ID_PFR2_EL1 S3_0_C0_C3_4
+#define ID_PFR2_EL1_RAS_FRAC_MASK ULL(0xf)
+#define ID_PFR2_EL1_RAS_FRAC_SHIFT U(8)
+#define ID_PFR2_EL1_RAS_FRAC_WIDTH U(4)
+#define ID_PFR2_EL1_RASV1P1_SUPPORTED ULL(0x1)
+
+/*******************************************************************************
+ * FEAT_FGT - Definitions for Fine-Grained Trap registers
+ ******************************************************************************/
+#define HFGITR_EL2_INIT_VAL ULL(0x180000000000000)
+#define HFGITR_EL2_FEAT_BRBE_MASK ULL(0x180000000000000)
+#define HFGITR_EL2_FEAT_SPECRES_MASK ULL(0x7000000000000)
+#define HFGITR_EL2_FEAT_TLBIRANGE_MASK ULL(0x3fc00000000)
+#define HFGITR_EL2_FEAT_TLBIRANGE_TLBIOS_MASK ULL(0xf000000)
+#define HFGITR_EL2_FEAT_TLBIOS_MASK ULL(0xfc0000)
+#define HFGITR_EL2_FEAT_PAN2_MASK ULL(0x30000)
+#define HFGITR_EL2_FEAT_DPB2_MASK ULL(0x200)
+#define HFGITR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x78fc03f000fdff)
+
+#define HFGRTR_EL2_INIT_VAL ULL(0xc4000000000000)
+#define HFGRTR_EL2_FEAT_SME_MASK ULL(0xc0000000000000)
+#define HFGRTR_EL2_FEAT_LS64_ACCDATA_MASK ULL(0x4000000000000)
+#define HFGRTR_EL2_FEAT_RAS_MASK ULL(0x27f0000000000)
+#define HFGRTR_EL2_FEAT_RASV1P1_MASK ULL(0x1800000000000)
+#define HFGRTR_EL2_FEAT_GICV3_MASK ULL(0x800000000)
+#define HFGRTR_EL2_FEAT_CSV2_2_CSV2_1P2_MASK ULL(0xc0000000)
+#define HFGRTR_EL2_FEAT_LOR_MASK ULL(0xf80000)
+#define HFGRTR_EL2_FEAT_PAUTH_MASK ULL(0x1f0)
+#define HFGRTR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x7f3f07fe0f)
+
+#define HFGWTR_EL2_INIT_VAL ULL(0xc4000000000000)
+#define HFGWTR_EL2_FEAT_SME_MASK ULL(0xc0000000000000)
+#define HFGWTR_EL2_FEAT_LS64_ACCDATA_MASK ULL(0x4000000000000)
+#define HFGWTR_EL2_FEAT_RAS_MASK ULL(0x23a0000000000)
+#define HFGWTR_EL2_FEAT_RASV1P1_MASK ULL(0x1800000000000)
+#define HFGWTR_EL2_FEAT_GICV3_MASK ULL(0x8000000000)
+#define HFGWTR_EL2_FEAT_CSV2_2_CSV2_1P2_MASK ULL(0xc0000000)
+#define HFGWTR_EL2_FEAT_LOR_MASK ULL(0xf80000)
+#define HFGWTR_EL2_FEAT_PAUTH_MASK ULL(0x1f0)
+#define HFGWTR_EL2_NON_FEAT_DEPENDENT_MASK ULL(0x7f2903380b)
+
#endif /* ARCH_H */
diff --git a/include/lib/aarch64/arch_features.h b/include/lib/aarch64/arch_features.h
index 15eb784a7..b6d0ce768 100644
--- a/include/lib/aarch64/arch_features.h
+++ b/include/lib/aarch64/arch_features.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,8 +8,11 @@
#define ARCH_FEATURES_H
#include <stdbool.h>
+#include <stdint.h>
+#include <arch_features.h>
#include <arch_helpers.h>
+#include <utils_def.h>
static inline bool is_armv7_gentimer_present(void)
{
@@ -17,6 +20,22 @@ static inline bool is_armv7_gentimer_present(void)
return true;
}
+static inline bool is_armv8_1_pan_present(void)
+{
+ u_register_t id_aa64mmfr1_pan =
+ EXTRACT(ID_AA64MMFR1_EL1_PAN, read_id_aa64mmfr1_el1());
+ return (id_aa64mmfr1_pan >= ID_AA64MMFR1_EL1_PAN_SUPPORTED) &&
+ (id_aa64mmfr1_pan <= ID_AA64MMFR1_EL1_PAN3_SUPPORTED);
+}
+
+static inline bool is_armv8_2_pan2_present(void)
+{
+ u_register_t id_aa64mmfr1_pan =
+ EXTRACT(ID_AA64MMFR1_EL1_PAN, read_id_aa64mmfr1_el1());
+ return (id_aa64mmfr1_pan >= ID_AA64MMFR1_EL1_PAN2_SUPPORTED) &&
+ (id_aa64mmfr1_pan <= ID_AA64MMFR1_EL1_PAN3_SUPPORTED);
+}
+
static inline bool is_armv8_2_sve_present(void)
{
return ((read_id_aa64pfr0_el1() >> ID_AA64PFR0_SVE_SHIFT) &
@@ -29,31 +48,62 @@ static inline bool is_armv8_2_ttcnp_present(void)
ID_AA64MMFR2_EL1_CNP_MASK) != 0U;
}
+static inline bool is_feat_pacqarma3_present(void)
+{
+ uint64_t mask_id_aa64isar2 =
+ (ID_AA64ISAR2_GPA3_MASK << ID_AA64ISAR2_GPA3_SHIFT) |
+ (ID_AA64ISAR2_APA3_MASK << ID_AA64ISAR2_APA3_SHIFT);
+
+ /* If any of the fields is not zero, QARMA3 algorithm is present */
+ return (read_id_aa64isar2_el1() & mask_id_aa64isar2) != 0U;
+}
+
static inline bool is_armv8_3_pauth_present(void)
{
- uint64_t mask = (ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) |
- (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT) |
- (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
- (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
+ uint64_t mask_id_aa64isar1 =
+ (ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) |
+ (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT) |
+ (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
+ (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
- /* If any of the fields is not zero, PAuth is present */
- return (read_id_aa64isar1_el1() & mask) != 0U;
+ /*
+ * If any of the fields is not zero or QARMA3 is present,
+ * PAuth is present.
+ */
+ return ((read_id_aa64isar1_el1() & mask_id_aa64isar1) != 0U ||
+ is_feat_pacqarma3_present());
}
-static inline bool is_armv8_3_pauth_apa_api_present(void)
+static inline bool is_armv8_3_pauth_apa_api_apa3_present(void)
{
- uint64_t mask = (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
- (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
+ uint64_t mask_id_aa64isar1 =
+ (ID_AA64ISAR1_API_MASK << ID_AA64ISAR1_API_SHIFT) |
+ (ID_AA64ISAR1_APA_MASK << ID_AA64ISAR1_APA_SHIFT);
+
+ uint64_t mask_id_aa64isar2 =
+ (ID_AA64ISAR2_APA3_MASK << ID_AA64ISAR2_APA3_SHIFT);
- return (read_id_aa64isar1_el1() & mask) != 0U;
+ return ((read_id_aa64isar1_el1() & mask_id_aa64isar1) |
+ (read_id_aa64isar2_el1() & mask_id_aa64isar2)) != 0U;
}
-static inline bool is_armv8_3_pauth_gpa_gpi_present(void)
+static inline bool is_armv8_3_pauth_gpa_gpi_gpa3_present(void)
{
- uint64_t mask = (ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) |
- (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT);
+ uint64_t mask_id_aa64isar1 =
+ (ID_AA64ISAR1_GPI_MASK << ID_AA64ISAR1_GPI_SHIFT) |
+ (ID_AA64ISAR1_GPA_MASK << ID_AA64ISAR1_GPA_SHIFT);
+
+ uint64_t mask_id_aa64isar2 =
+ (ID_AA64ISAR2_GPA3_MASK << ID_AA64ISAR2_GPA3_SHIFT);
- return (read_id_aa64isar1_el1() & mask) != 0U;
+ return ((read_id_aa64isar1_el1() & mask_id_aa64isar1) |
+ (read_id_aa64isar2_el1() & mask_id_aa64isar2)) != 0U;
+}
+
+static inline bool is_armv8_4_dit_present(void)
+{
+ return ((read_id_aa64pfr0_el1() >> ID_AA64PFR0_DIT_SHIFT) &
+ ID_AA64PFR0_DIT_MASK) == 1U;
}
static inline bool is_armv8_4_ttst_present(void)
@@ -86,10 +136,287 @@ static inline unsigned long int get_armv8_6_ecv_support(void)
ID_AA64MMFR0_EL1_ECV_MASK);
}
+static inline unsigned long int get_pa_range(void)
+{
+ return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_PARANGE_SHIFT) &
+ ID_AA64MMFR0_EL1_PARANGE_MASK);
+}
+
static inline uint32_t arch_get_debug_version(void)
{
return ((read_id_aa64dfr0_el1() & ID_AA64DFR0_DEBUG_BITS) >>
ID_AA64DFR0_DEBUG_SHIFT);
}
+static inline bool get_armv9_0_trbe_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEBUFFER_SHIFT) &
+ ID_AA64DFR0_TRACEBUFFER_MASK) ==
+ ID_AA64DFR0_TRACEBUFFER_SUPPORTED;
+}
+
+static inline bool get_armv8_4_trf_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEFILT_SHIFT) &
+ ID_AA64DFR0_TRACEFILT_MASK) ==
+ ID_AA64DFR0_TRACEFILT_SUPPORTED;
+}
+
+static inline bool get_armv8_0_sys_reg_trace_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_TRACEVER_SHIFT) &
+ ID_AA64DFR0_TRACEVER_MASK) ==
+ ID_AA64DFR0_TRACEVER_SUPPORTED;
+}
+
+static inline unsigned int get_armv9_2_feat_rme_support(void)
+{
+ /*
+ * Return the RME version, zero if not supported. This function can be
+ * used as both an integer value for the RME version or compared to zero
+ * to detect RME presence.
+ */
+ return (unsigned int)(read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK;
+}
+
+static inline bool get_feat_hcx_support(void)
+{
+ return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_HCX_SHIFT) &
+ ID_AA64MMFR1_EL1_HCX_MASK) == ID_AA64MMFR1_EL1_HCX_SUPPORTED);
+}
+
+static inline bool get_feat_afp_present(void)
+{
+ return (((read_id_aa64mmfr1_el1() >> ID_AA64MMFR1_EL1_AFP_SHIFT) &
+ ID_AA64MMFR1_EL1_AFP_MASK) == ID_AA64MMFR1_EL1_AFP_SUPPORTED);
+}
+
+static inline bool get_feat_brbe_support(void)
+{
+ return ((read_id_aa64dfr0_el1() >> ID_AA64DFR0_BRBE_SHIFT) &
+ ID_AA64DFR0_BRBE_MASK) ==
+ ID_AA64DFR0_BRBE_SUPPORTED;
+}
+
+static inline bool get_feat_wfxt_present(void)
+{
+ return (((read_id_aa64isar2_el1() >> ID_AA64ISAR2_WFXT_SHIFT) &
+ ID_AA64ISAR2_WFXT_MASK) == ID_AA64ISAR2_WFXT_SUPPORTED);
+}
+
+static inline bool is_feat_rng_trap_present(void)
+{
+ return (((read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_RNDR_TRAP_SHIFT) &
+ ID_AA64PFR1_EL1_RNDR_TRAP_MASK)
+ == ID_AA64PFR1_EL1_RNG_TRAP_SUPPORTED);
+}
+
+static inline bool is_feat_mpam_supported(void)
+{
+ /*
+ * If the MPAM version retreived from the Processor Feature registers
+ * is a non-zero value, then MPAM is supported.
+ */
+
+ return (((((read_id_aa64pfr0_el1() >>
+ ID_AA64PFR0_MPAM_SHIFT) & ID_AA64PFR0_MPAM_MASK) << 4) |
+ ((read_id_aa64pfr1_el1() >>
+ ID_AA64PFR1_MPAM_FRAC_SHIFT) & ID_AA64PFR1_MPAM_FRAC_MASK)) != 0U);
+}
+
+static inline unsigned int spe_get_version(void)
+{
+ return (unsigned int)((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMS_SHIFT) &
+ ID_AA64DFR0_PMS_MASK);
+}
+
+static inline bool get_feat_pmuv3_supported(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_PMUVER_SHIFT) &
+ ID_AA64DFR0_PMUVER_MASK) != ID_AA64DFR0_PMUVER_NOT_SUPPORTED);
+}
+
+static inline bool get_feat_hpmn0_supported(void)
+{
+ return (((read_id_aa64dfr0_el1() >> ID_AA64DFR0_HPMN0_SHIFT) &
+ ID_AA64DFR0_HPMN0_MASK) == ID_AA64DFR0_HPMN0_SUPPORTED);
+}
+
+static inline bool is_feat_sme_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT;
+ return (features & ID_AA64PFR1_EL1_SME_MASK) >= ID_AA64PFR1_EL1_SME_SUPPORTED;
+}
+
+static inline bool is_feat_sme_fa64_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64smfr0_el1();
+ return (features & ID_AA64SMFR0_EL1_FA64_BIT) != 0U;
+}
+
+static inline bool is_feat_sme2_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT;
+ return (features & ID_AA64PFR1_EL1_SME_MASK) >= ID_AA64PFR1_EL1_SME2_SUPPORTED;
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran4(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN4, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran4_2(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN4_2, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran16(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN16, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran16_2(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN16_2, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran64(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN64, read_id_aa64mmfr0_el1());
+}
+
+static inline u_register_t get_id_aa64mmfr0_el0_tgran64_2(void)
+{
+ return EXTRACT(ID_AA64MMFR0_EL1_TGRAN64_2, read_id_aa64mmfr0_el1());
+}
+
+static inline bool is_feat_52b_on_4k_supported(void)
+{
+ return (get_id_aa64mmfr0_el0_tgran4() ==
+ ID_AA64MMFR0_EL1_TGRAN4_52B_SUPPORTED);
+}
+
+static inline bool is_feat_52b_on_4k_2_supported(void)
+{
+ u_register_t tgran4_2 = get_id_aa64mmfr0_el0_tgran4_2();
+
+ return ((tgran4_2 == ID_AA64MMFR0_EL1_TGRAN4_2_52B_SUPPORTED) ||
+ ((tgran4_2 == ID_AA64MMFR0_EL1_TGRAN4_2_AS_1)
+ && (is_feat_52b_on_4k_supported() == true)));
+}
+
+static inline bool is_feat_specres_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_SPECRES, read_id_aa64isar1_el1())
+ == ID_AA64ISAR1_SPECRES_SUPPORTED;
+}
+
+static inline bool is_feat_tlbirange_present(void)
+{
+ return EXTRACT(ID_AA64ISAR0_TLB, read_id_aa64isar0_el1())
+ == ID_AA64ISAR0_TLBIRANGE_SUPPORTED;
+}
+
+static inline bool is_feat_tlbios_present(void)
+{
+ return EXTRACT(ID_AA64ISAR0_TLB, read_id_aa64isar0_el1())
+ != ID_AA64ISAR0_TLB_NOT_SUPPORTED;
+}
+
+static inline bool is_feat_dpb_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_DPB, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_DPB_SUPPORTED;
+}
+
+static inline bool is_feat_dpb2_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_DPB, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_DPB2_SUPPORTED;
+}
+
+static inline bool is_feat_ls64_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_LS64, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_LS64_SUPPORTED;
+}
+
+static inline bool is_feat_ls64_v_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_LS64, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_LS64_V_SUPPORTED;
+}
+
+static inline bool is_feat_ls64_accdata_present(void)
+{
+ return EXTRACT(ID_AA64ISAR1_LS64, read_id_aa64isar1_el1())
+ >= ID_AA64ISAR1_LS64_ACCDATA_SUPPORTED;
+}
+
+static inline bool is_feat_ras_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_RAS, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_RAS_SUPPORTED;
+}
+
+static inline bool is_feat_rasv1p1_present(void)
+{
+ return (EXTRACT(ID_AA64PFR0_RAS, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_RASV1P1_SUPPORTED)
+ || (is_feat_ras_present() &&
+ (EXTRACT(ID_AA64PFR1_RAS_FRAC, read_id_aa64pfr1_el1())
+ == ID_AA64PFR1_RASV1P1_SUPPORTED))
+ || (EXTRACT(ID_PFR0_EL1_RAS, read_id_pfr0_el1())
+ == ID_PFR0_EL1_RASV1P1_SUPPORTED)
+ || ((EXTRACT(ID_PFR0_EL1_RAS, read_id_pfr0_el1())
+ == ID_PFR0_EL1_RAS_SUPPORTED) &&
+ (EXTRACT(ID_PFR2_EL1_RAS_FRAC, read_id_pfr2_el1())
+ == ID_PFR2_EL1_RASV1P1_SUPPORTED));
+}
+
+static inline bool is_feat_gicv3_gicv4_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_GIC, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_GICV3_GICV4_SUPPORTED;
+}
+
+static inline bool is_feat_csv2_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_CSV2, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_CSV2_SUPPORTED;
+}
+
+static inline bool is_feat_csv2_2_present(void)
+{
+ return EXTRACT(ID_AA64PFR0_CSV2, read_id_aa64pfr0_el1())
+ == ID_AA64PFR0_CSV2_2_SUPPORTED;
+}
+
+static inline bool is_feat_csv2_1p1_present(void)
+{
+ return is_feat_csv2_present() &&
+ (EXTRACT(ID_AA64PFR1_CSV2_FRAC, read_id_aa64pfr1_el1())
+ == ID_AA64PFR1_CSV2_1P1_SUPPORTED);
+}
+
+static inline bool is_feat_csv2_1p2_present(void)
+{
+ return is_feat_csv2_present() &&
+ (EXTRACT(ID_AA64PFR1_CSV2_FRAC, read_id_aa64pfr1_el1())
+ == ID_AA64PFR1_CSV2_1P2_SUPPORTED);
+}
+
+static inline bool is_feat_lor_present(void)
+{
+ return EXTRACT(ID_AA64MMFR1_EL1_LO, read_id_aa64mmfr1_el1())
+ != ID_AA64MMFR1_EL1_LOR_NOT_SUPPORTED;
+}
+
#endif /* ARCH_FEATURES_H */
diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h
index 39f1e3b87..4b9c33e31 100644
--- a/include/lib/aarch64/arch_helpers.h
+++ b/include/lib/aarch64/arch_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -75,6 +75,13 @@ static inline void _op ## _type(void) \
__asm__ (#_op " " #_type); \
}
+/* Define function for system instruction with register with variable parameter */
+#define DEFINE_SYSOP_PARAM_FUNC(_op) \
+static inline void _op(uint64_t v) \
+{ \
+ __asm__ (#_op " " "%0" : : "r" (v)); \
+}
+
/* Define function for system instruction with register parameter */
#define DEFINE_SYSOP_TYPE_PARAM_FUNC(_op, _type) \
static inline void _op ## _type(uint64_t v) \
@@ -179,14 +186,18 @@ void disable_mmu_icache(void);
DEFINE_SYSREG_RW_FUNCS(par_el1)
DEFINE_SYSREG_READ_FUNC(id_pfr1_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64isar0_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64isar1_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64pfr0_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64pfr1_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64dfr0_el1)
DEFINE_SYSREG_READ_FUNC(id_afr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_pfr0_el1)
DEFINE_SYSREG_READ_FUNC(CurrentEl)
DEFINE_SYSREG_READ_FUNC(ctr_el0)
DEFINE_SYSREG_RW_FUNCS(daif)
+DEFINE_SYSREG_RW_FUNCS(nzcv)
+DEFINE_SYSREG_READ_FUNC(spsel)
DEFINE_SYSREG_RW_FUNCS(spsr_el1)
DEFINE_SYSREG_RW_FUNCS(spsr_el2)
DEFINE_SYSREG_RW_FUNCS(spsr_el3)
@@ -197,6 +208,7 @@ DEFINE_SYSREG_RW_FUNCS(elr_el3)
DEFINE_SYSOP_FUNC(wfi)
DEFINE_SYSOP_FUNC(wfe)
DEFINE_SYSOP_FUNC(sev)
+DEFINE_SYSOP_FUNC(sevl)
DEFINE_SYSOP_TYPE_FUNC(dsb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, sy)
DEFINE_SYSOP_TYPE_FUNC(dmb, st)
@@ -215,6 +227,9 @@ DEFINE_SYSOP_TYPE_FUNC(dmb, ishst)
DEFINE_SYSOP_TYPE_FUNC(dmb, ish)
DEFINE_SYSOP_FUNC(isb)
+DEFINE_SYSOP_PARAM_FUNC(wfit)
+DEFINE_SYSOP_PARAM_FUNC(wfet)
+
static inline void enable_irq(void)
{
/*
@@ -292,6 +307,7 @@ void __dead2 smc(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3,
DEFINE_SYSREG_READ_FUNC(midr_el1)
DEFINE_SYSREG_READ_FUNC(mpidr_el1)
DEFINE_SYSREG_READ_FUNC(id_aa64mmfr0_el1)
+DEFINE_SYSREG_READ_FUNC(id_aa64mmfr1_el1)
DEFINE_SYSREG_RW_FUNCS(scr_el3)
DEFINE_SYSREG_RW_FUNCS(hcr_el2)
@@ -383,6 +399,8 @@ DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
#define clr_cntp_ctl_enable(x) ((x) &= ~(U(1) << CNTP_CTL_ENABLE_SHIFT))
#define clr_cntp_ctl_imask(x) ((x) &= ~(U(1) << CNTP_CTL_IMASK_SHIFT))
+#define read_midr() read_midr_el1()
+
DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
DEFINE_SYSREG_RW_FUNCS(cntvoff_el2)
@@ -395,16 +413,53 @@ DEFINE_SYSREG_READ_FUNC(isr_el1)
DEFINE_SYSREG_RW_FUNCS(mdcr_el2)
DEFINE_SYSREG_RW_FUNCS(mdcr_el3)
DEFINE_SYSREG_RW_FUNCS(hstr_el2)
+
DEFINE_SYSREG_RW_FUNCS(pmcr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmcntenclr_el0)
DEFINE_SYSREG_RW_FUNCS(pmcntenset_el0)
-DEFINE_SYSREG_READ_FUNC(pmccntr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmccntr_el0)
DEFINE_SYSREG_RW_FUNCS(pmccfiltr_el0)
-
DEFINE_SYSREG_RW_FUNCS(pmevtyper0_el0)
-DEFINE_SYSREG_READ_FUNC(pmevcntr0_el0)
+DEFINE_SYSREG_RW_FUNCS(pmevcntr0_el0)
+DEFINE_SYSREG_RW_FUNCS(pmovsclr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmovsset_el0)
+DEFINE_SYSREG_RW_FUNCS(pmselr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmuserenr_el0);
+DEFINE_SYSREG_RW_FUNCS(pmxevtyper_el0)
+DEFINE_SYSREG_RW_FUNCS(pmxevcntr_el0)
+DEFINE_SYSREG_RW_FUNCS(pmintenclr_el1)
+DEFINE_SYSREG_RW_FUNCS(pmintenset_el1)
+
+/* parameterised event counter accessors */
+static inline u_register_t read_pmevcntrn_el0(int ctr_num)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ return read_pmxevcntr_el0();
+}
-/* GICv3 System Registers */
+static inline void write_pmevcntrn_el0(int ctr_num, u_register_t val)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ write_pmxevcntr_el0(val);
+}
+
+static inline u_register_t read_pmevtypern_el0(int ctr_num)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ return read_pmxevtyper_el0();
+}
+
+static inline void write_pmevtypern_el0(int ctr_num, u_register_t val)
+{
+ write_pmselr_el0(ctr_num & PMSELR_EL0_SEL_MASK);
+ write_pmxevtyper_el0(val);
+}
+/* Armv8.5 FEAT_RNG Registers */
+DEFINE_SYSREG_READ_FUNC(rndr)
+DEFINE_SYSREG_READ_FUNC(rndrrs)
+
+/* GICv3 System Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el2, ICC_SRE_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el3, ICC_SRE_EL3)
@@ -422,6 +477,12 @@ DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_eoir1_el1, ICC_EOIR1_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(icc_sgi0r_el1, ICC_SGI0R_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sgi1r, ICC_SGI1R)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icv_ctrl_el1, ICV_CTRL_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(icv_iar1_el1, ICV_IAR1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icv_igrpen1_el1, ICV_IGRPEN1_EL1)
+DEFINE_RENAME_SYSREG_WRITE_FUNC(icv_eoir1_el1, ICV_EOIR1_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(icv_pmr_el1, ICV_PMR_EL1)
+
DEFINE_RENAME_SYSREG_RW_FUNCS(amcr_el0, AMCR_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcgcr_el0, AMCGCR_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(amcfgr_el0, AMCFGR_EL0)
@@ -436,10 +497,29 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
+/* Static profiling control registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmscr_el1, PMSCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsevfr_el1, PMSEVFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsfcr_el1, PMSFCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsicr_el1, PMSICR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsidr_el1, PMSIDR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsirr_el1, PMSIRR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmslatfr_el1, PMSLATFR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmsnevfr_el1, PMSNEVFR_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmbptr_el1, PMBPTR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmbsr_el1, PMBSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(pmscr_el2, PMSCR_EL2)
+
+/* Definitions for system register interface to SVE */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64zfr0_el1, ID_AA64ZFR0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(zcr_el2, ZCR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(zcr_el1, ZCR_EL1)
-DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
-DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64smfr0_el1, ID_AA64SMFR0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(svcr, SVCR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(tpidr2_el0, TPIDR2_EL0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(smcr_el2, SMCR_EL2)
DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
@@ -451,6 +531,9 @@ DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
+/* Armv8.1 Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(pan, PAN)
+
/* Armv8.2 Registers */
DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64mmfr2_el1, ID_AA64MMFR2_EL1)
@@ -479,6 +562,9 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(tfsr_el1, TFSR_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(rgsr_el1, RGSR_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(gcr_el1, GCR_EL1)
+/* Armv8.4 Data Independent Timing */
+DEFINE_RENAME_SYSREG_RW_FUNCS(dit, DIT)
+
/* Armv8.6 Fine Grained Virtualization Traps Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(hfgrtr_el2, HFGRTR_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(hfgwtr_el2, HFGWTR_EL2)
@@ -489,6 +575,55 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(hdfgwtr_el2, HDFGWTR_EL2)
/* Armv8.6 Enhanced Counter Virtualization Register */
DEFINE_RENAME_SYSREG_RW_FUNCS(cntpoff_el2, CNTPOFF_EL2)
+/* Armv9.0 Trace buffer extension System Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(trblimitr_el1, TRBLIMITR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbptr_el1, TRBPTR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbbaser_el1, TRBBASER_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbsr_el1, TRBSR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbmar_el1, TRBMAR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trbtrg_el1, TRBTRG_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(trbidr_el1, TRBIDR_EL1)
+
+/* FEAT_BRBE Branch record buffer extension system registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbcr_el1, BRBCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbcr_el2, BRBCR_EL2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbfcr_el1, BRBFCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbts_el1, BRBTS_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbinfinj_el1, BRBINFINJ_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbsrcinj_el1, BRBSRCINJ_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(brbtgtinj_el1, BRBTGTINJ_EL1)
+DEFINE_RENAME_SYSREG_READ_FUNC(brbidr0_el1, BRBIDR0_EL1)
+
+/* Armv8.4 Trace filter control System Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(trfcr_el1, TRFCR_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trfcr_el2, TRFCR_EL2)
+
+/* Trace System Registers */
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcauxctlr, TRCAUXCTLR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcrsr, TRCRSR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcbbctlr, TRCBBCTLR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcccctlr, TRCCCCTLR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr0, TRCEXTINSELR0)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr1, TRCEXTINSELR1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr2, TRCEXTINSELR2)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcextinselr3, TRCEXTINSELR3)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcclaimset, TRCCLAIMSET)
+DEFINE_RENAME_SYSREG_RW_FUNCS(trcclaimclr, TRCCLAIMCLR)
+DEFINE_RENAME_SYSREG_READ_FUNC(trcdevarch, TRCDEVARCH)
+
+/* FEAT_HCX HCRX_EL2 */
+DEFINE_RENAME_SYSREG_RW_FUNCS(hcrx_el2, HCRX_EL2)
+
+/* Floating point control and status register */
+DEFINE_RENAME_SYSREG_RW_FUNCS(fpcr, FPCR)
+DEFINE_RENAME_SYSREG_RW_FUNCS(fpsr, FPSR)
+
+/* ID_AA64ISAR2_EL1 */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64isar2_el1, ID_AA64ISAR2_EL1)
+
+/* ID_PFR2_EL1 */
+DEFINE_RENAME_SYSREG_READ_FUNC(id_pfr2_el1, ID_PFR2_EL1)
+
#define IS_IN_EL(x) \
(GET_EL(read_CurrentEl()) == MODE_EL##x)
@@ -527,4 +662,16 @@ static inline uint64_t syscounter_read(void)
return read_cntpct_el0();
}
+/* Read the value of the Counter-timer virtual count. */
+static inline uint64_t virtualcounter_read(void)
+{
+ /*
+ * The instruction barrier is needed to guarantee that we read an
+ * accurate value. Otherwise, the CPU might speculatively read it and
+ * return a stale value.
+ */
+ isb();
+ return read_cntvct_el0();
+}
+
#endif /* ARCH_HELPERS_H */
diff --git a/include/lib/aarch64/serror.h b/include/lib/aarch64/serror.h
new file mode 100644
index 000000000..ac25f8702
--- /dev/null
+++ b/include/lib/aarch64/serror.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SERROR_H__
+#define __SERROR_H__
+
+typedef bool (*exception_handler_t)(void);
+void register_custom_serror_handler(exception_handler_t handler);
+void unregister_custom_serror_handler(void);
+
+#endif /* __SERROR_H__ */
diff --git a/include/lib/aarch64/sync.h b/include/lib/aarch64/sync.h
new file mode 100644
index 000000000..5058980c8
--- /dev/null
+++ b/include/lib/aarch64/sync.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __SYNC_H__
+#define __SYNC_H__
+
+typedef bool (*exception_handler_t)(void);
+void register_custom_sync_exception_handler(exception_handler_t handler);
+void unregister_custom_sync_exception_handler(void);
+
+#endif /* __SYNC_H__ */
diff --git a/include/lib/extensions/fpu.h b/include/lib/extensions/fpu.h
new file mode 100644
index 000000000..1a828182e
--- /dev/null
+++ b/include/lib/extensions/fpu.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef FPU_H
+#define FPU_H
+
+/* The FPU and SIMD register bank is 32 quadword (128 bits) Q registers. */
+#define FPU_Q_SIZE 16U
+#define FPU_Q_COUNT 32U
+
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+#include <stdint.h>
+
+typedef uint8_t fpu_q_reg_t[FPU_Q_SIZE] __aligned(16);
+typedef struct fpu_cs_regs {
+ unsigned long fpcr;
+ unsigned long fpsr;
+} fpu_cs_regs_t __aligned(16);
+
+typedef struct fpu_state {
+ fpu_q_reg_t q_regs[FPU_Q_COUNT];
+ fpu_cs_regs_t cs_regs;
+} fpu_state_t __aligned(16);
+
+void fpu_cs_regs_write(const fpu_cs_regs_t *cs_regs);
+void fpu_cs_regs_write_rand(fpu_cs_regs_t *cs_regs);
+void fpu_cs_regs_read(fpu_cs_regs_t *cs_regs);
+int fpu_cs_regs_compare(const fpu_cs_regs_t *s1, const fpu_cs_regs_t *s2);
+
+void fpu_q_regs_write_rand(fpu_q_reg_t q_regs[FPU_Q_COUNT]);
+void fpu_q_regs_read(fpu_q_reg_t q_regs[FPU_Q_COUNT]);
+int fpu_q_regs_compare(const fpu_q_reg_t s1[FPU_Q_COUNT],
+ const fpu_q_reg_t s2[FPU_Q_COUNT]);
+
+void fpu_state_write_rand(fpu_state_t *fpu_state);
+void fpu_state_read(fpu_state_t *fpu_state);
+int fpu_state_compare(const fpu_state_t *s1, const fpu_state_t *s2);
+
+#endif /* __ASSEMBLER__ */
+#endif /* FPU_H */
diff --git a/include/lib/extensions/pauth.h b/include/lib/extensions/pauth.h
index d072f5c57..8816e1869 100644
--- a/include/lib/extensions/pauth.h
+++ b/include/lib/extensions/pauth.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,9 +7,15 @@
#ifndef PAUTH_H
#define PAUTH_H
+#include <stdbool.h>
#include <stdint.h>
#ifdef __aarch64__
+/* Number of ARMv8.3-PAuth keys */
+#define NUM_KEYS 5U
+
+static const char * const key_name[] = {"IA", "IB", "DA", "DB", "GA"};
+
/* Initialize 128-bit ARMv8.3-PAuth key */
uint128_t init_apkey(void);
@@ -18,6 +24,22 @@ void pauth_init_enable(void);
/* Disable ARMv8.3-PAuth */
void pauth_disable(void);
+
+/*
+ * Fill Pauth Keys and template with random values if keys werenot initialized earlier,
+ * Else Copy PAuth key registers to template.
+ */
+void pauth_test_lib_fill_regs_and_template(uint128_t *pauth_keys_arr);
+
+/* Read and Compare PAuth registers with provided template values. */
+bool pauth_test_lib_compare_template(uint128_t *pauth_keys_before, uint128_t *pauth_keys_after);
+
+/* Read and Store PAuth registers in template. */
+void pauth_test_lib_read_keys(uint128_t *pauth_keys_arr);
+
+/* Test PAuth instructions. */
+void pauth_test_lib_test_intrs(void);
+
#endif /* __aarch64__ */
#endif /* PAUTH_H */
diff --git a/include/lib/extensions/sme.h b/include/lib/extensions/sme.h
new file mode 100644
index 000000000..4a7e9b74a
--- /dev/null
+++ b/include/lib/extensions/sme.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SME_H
+#define SME_H
+
+#include <stdlib.h> /* for rand() */
+
+#define MAX_VL (512)
+#define MAX_VL_B (MAX_VL / 8)
+#define SME_SVQ_ARCH_MAX (MASK(SMCR_ELX_LEN) >> SMCR_ELX_LEN_SHIFT)
+
+/* get a random Streaming SVE VQ b/w 0 to SME_SVQ_ARCH_MAX */
+#define SME_GET_RANDOM_SVQ (rand() % (SME_SVQ_ARCH_MAX + 1))
+
+typedef enum {
+ SMSTART, /* enters streaming sve mode and enables SME ZA array */
+ SMSTART_SM, /* enters streaming sve mode only */
+ SMSTART_ZA, /* enables SME ZA array storage only */
+} smestart_instruction_type_t;
+
+typedef enum {
+ SMSTOP, /* exits streaming sve mode, & disables SME ZA array */
+ SMSTOP_SM, /* exits streaming sve mode only */
+ SMSTOP_ZA, /* disables SME ZA array storage only */
+} smestop_instruction_type_t;
+
+/* SME feature related prototypes. */
+void sme_smstart(smestart_instruction_type_t smstart_type);
+void sme_smstop(smestop_instruction_type_t smstop_type);
+
+/* Assembly function prototypes. */
+uint64_t sme_rdsvl_1(void);
+void sme_try_illegal_instruction(void);
+void sme_vector_to_ZA(const uint64_t *input_vector);
+void sme_ZA_to_vector(const uint64_t *output_vector);
+void sme2_load_zt0_instruction(const uint64_t *inputbuf);
+void sme2_store_zt0_instruction(const uint64_t *outputbuf);
+void sme_config_svq(uint32_t svq);
+void sme_enable_fa64(void);
+void sme_disable_fa64(void);
+bool sme_smstat_sm(void);
+bool sme_feat_fa64_enabled(void);
+
+#endif /* SME_H */
diff --git a/include/lib/extensions/sve.h b/include/lib/extensions/sve.h
new file mode 100644
index 000000000..b2cd2a6d5
--- /dev/null
+++ b/include/lib/extensions/sve.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SVE_H
+#define SVE_H
+
+#include <arch.h>
+#include <stdlib.h> /* for rand() */
+#include <lib/extensions/sme.h>
+
+#define fill_sve_helper(num) "ldr z"#num", [%0, #"#num", MUL VL];"
+#define read_sve_helper(num) "str z"#num", [%0, #"#num", MUL VL];"
+
+#define fill_sve_p_helper(num) "ldr p"#num", [%0, #"#num", MUL VL];"
+#define read_sve_p_helper(num) "str p"#num", [%0, #"#num", MUL VL];"
+
+/*
+ * Max. vector length permitted by the architecture:
+ * SVE: 2048 bits = 256 bytes
+ */
+#define SVE_VECTOR_LEN_BYTES (256U)
+#define SVE_NUM_VECTORS (32U)
+
+/* Max size of one predicate register is 1/8 of Z register */
+#define SVE_P_REG_LEN_BYTES (SVE_VECTOR_LEN_BYTES / 8U)
+#define SVE_NUM_P_REGS (16U)
+
+/* Max size of one FFR register is 1/8 of Z register */
+#define SVE_FFR_REG_LEN_BYTES (SVE_VECTOR_LEN_BYTES / 8U)
+#define SVE_NUM_FFR_REGS (1U)
+
+#define SVE_VQ_ARCH_MIN (0U)
+#define SVE_VQ_ARCH_MAX ((1U << ZCR_EL2_SVE_VL_WIDTH) - 1U)
+
+/* convert SVE VL in bytes to VQ */
+#define SVE_VL_TO_VQ(vl_bytes) (((vl_bytes) >> 4U) - 1U)
+
+/* convert SVE VQ to bits */
+#define SVE_VQ_TO_BITS(vq) (((vq) + 1U) << 7U)
+
+/* convert SVE VQ to bytes */
+#define SVE_VQ_TO_BYTES(vq) (SVE_VQ_TO_BITS(vq) / 8U)
+
+/* get a random SVE VQ b/w 0 to SVE_VQ_ARCH_MAX */
+#define SVE_GET_RANDOM_VQ (rand() % (SVE_VQ_ARCH_MAX + 1U))
+
+#ifndef __ASSEMBLY__
+
+typedef uint8_t sve_z_regs_t[SVE_NUM_VECTORS * SVE_VECTOR_LEN_BYTES]
+ __aligned(16);
+typedef uint8_t sve_p_regs_t[SVE_NUM_P_REGS * SVE_P_REG_LEN_BYTES]
+ __aligned(16);
+typedef uint8_t sve_ffr_regs_t[SVE_NUM_FFR_REGS * SVE_FFR_REG_LEN_BYTES]
+ __aligned(16);
+
+uint64_t sve_rdvl_1(void);
+void sve_config_vq(uint8_t sve_vq);
+uint32_t sve_probe_vl(uint8_t sve_max_vq);
+uint64_t sve_read_zcr_elx(void);
+void sve_write_zcr_elx(uint64_t rval);
+
+void sve_z_regs_write(const sve_z_regs_t *z_regs);
+void sve_z_regs_write_rand(sve_z_regs_t *z_regs);
+void sve_z_regs_read(sve_z_regs_t *z_regs);
+uint64_t sve_z_regs_compare(const sve_z_regs_t *s1, const sve_z_regs_t *s2);
+
+void sve_p_regs_write(const sve_p_regs_t *p_regs);
+void sve_p_regs_write_rand(sve_p_regs_t *p_regs);
+void sve_p_regs_read(sve_p_regs_t *p_regs);
+uint64_t sve_p_regs_compare(const sve_p_regs_t *s1, const sve_p_regs_t *s2);
+
+void sve_ffr_regs_write(const sve_ffr_regs_t *ffr_regs);
+void sve_ffr_regs_write_rand(sve_ffr_regs_t *ffr_regs);
+void sve_ffr_regs_read(sve_ffr_regs_t *ffr_regs);
+uint64_t sve_ffr_regs_compare(const sve_ffr_regs_t *s1,
+ const sve_ffr_regs_t *s2);
+
+/* Assembly routines */
+bool sve_subtract_arrays_interleaved(int *dst_array, int *src_array1,
+ int *src_array2, int array_size,
+ bool (*world_switch_cb)(void));
+
+void sve_subtract_arrays(int *dst_array, int *src_array1, int *src_array2,
+ int array_size);
+
+#endif /* __ASSEMBLY__ */
+#endif /* SVE_H */
diff --git a/include/lib/heap/page_alloc.h b/include/lib/heap/page_alloc.h
new file mode 100644
index 000000000..7580b78e5
--- /dev/null
+++ b/include/lib/heap/page_alloc.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef PAGE_ALLOC_H
+#define PAGE_ALLOC_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define HEAP_NULL_PTR 0U
+#define HEAP_INVALID_LEN -1
+#define HEAP_OUT_OF_RANGE -2
+#define HEAP_INIT_FAILED -3
+#define HEAP_INIT_SUCCESS 0
+
+/*
+ * Initialize the memory heap space to be used
+ * @heap_base: heap base address
+ * @heap_len: heap size for use
+ */
+int page_pool_init(uint64_t heap_base, uint64_t heap_len);
+
+/*
+ * Return the pointer to the allocated pages
+ * @bytes_size: pages to allocate in byte unit
+ */
+void *page_alloc(u_register_t bytes_size);
+
+/*
+ * Reset heap memory usage cursor to heap base address
+ */
+void page_pool_reset(void);
+void page_free(u_register_t ptr);
+
+#endif /* PAGE_ALLOC_H */
diff --git a/include/lib/sprt/sprt_client.h b/include/lib/sprt/sprt_client.h
deleted file mode 100644
index 44c3c2560..000000000
--- a/include/lib/sprt/sprt_client.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SPRT_CLIENT_H
-#define SPRT_CLIENT_H
-
-#include <stdint.h>
-
-#include "sprt_common.h"
-
-/*
- * Point the SPRT library at a shared buffer between SPM and SP.
- */
-void sprt_initialize_queues(void *buffer_base);
-
-/*
- * Return SPRT version.
- */
-uint32_t sprt_version(void);
-
-/*
- * Called by the main SPRT client execution context when there are no more
- * messages available via sprt_get_next_message(), or if the SPRT client wishes
- * to yield execution to allow other SPs to run.
- */
-void sprt_wait_for_messages(void);
-
-/*
- * Returns the next message to be processed by the SPRT client. There can be
- * multiple queues of messages for a partition, `queue_num` specifies which
- * queue to read from. Each message can be retrieved only once. The message
- * pointer must point to a valid memory owned by the caller. A zero return
- * value indicates there is a message for the SP, -ENOENT means there are no
- * messages.
- */
-int sprt_get_next_message(struct sprt_queue_entry_message *message,
- int queue_num);
-
-/*
- * End processing of the message passing arg0 to arg3 back to the SPCI client.
- */
-void sprt_message_end(struct sprt_queue_entry_message *message,
- u_register_t arg0, u_register_t arg1, u_register_t arg2,
- u_register_t arg3);
-
-#endif /* SPRT_CLIENT_H */
diff --git a/include/lib/sprt/sprt_common.h b/include/lib/sprt/sprt_common.h
deleted file mode 100644
index 787e7b65a..000000000
--- a/include/lib/sprt/sprt_common.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SPRT_COMMON_H
-#define SPRT_COMMON_H
-
-#define SPRT_MAX_MSG_ARGS 6
-
-/*
- * Message types supported.
- */
-#define SPRT_MSG_TYPE_SERVICE_HANDLE_OPEN 1
-#define SPRT_MSG_TYPE_SERVICE_HANDLE_CLOSE 2
-/* TODO: Add other types of SPRT messages. */
-#define SPRT_MSG_TYPE_SERVICE_REQUEST 10
-
-/*
- * Struct that defines the layout of the fields corresponding to a request in
- * shared memory.
- */
-struct __attribute__((__packed__)) sprt_queue_entry_message {
- uint32_t type; /* Type of message (result of an SPCI call). */
- uint16_t client_id; /* SPCI client ID */
- uint16_t service_handle;/* SPCI service handle */
- uint32_t session_id; /* Optional SPCI session ID */
- uint32_t token; /* SPCI request token */
- uint64_t args[SPRT_MAX_MSG_ARGS];
-};
-
-#define SPRT_QUEUE_ENTRY_MSG_SIZE (sizeof(struct sprt_queue_entry_message))
-
-#define SPRT_QUEUE_NUM_BLOCKING 0
-#define SPRT_QUEUE_NUM_NON_BLOCKING 1
-
-#endif /* SPRT_COMMON_H */
diff --git a/include/lib/tftf_lib.h b/include/lib/tftf_lib.h
index f9d1a6eda..36e2e0fe5 100644
--- a/include/lib/tftf_lib.h
+++ b/include/lib/tftf_lib.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -38,6 +38,23 @@ typedef enum {
#define TEST_RESULT_IS_VALID(result) \
((result >= TEST_RESULT_MIN) && (result < TEST_RESULT_MAX))
+#define TEST_ASSERT(must_be_true) \
+ do { \
+ if (!(must_be_true)) { \
+ tftf_testcase_printf("Failed at %s:%d\n", __FILE__, __LINE__); \
+ return TEST_RESULT_FAIL;\
+ } \
+ } while (0)
+
+#define TEST_ASSERT_SKIP(must_be_true) \
+ do { \
+ if (!(must_be_true)) { \
+ tftf_testcase_printf("Failed at %s:%d\n", __FILE__, __LINE__); \
+ return TEST_RESULT_SKIPPED;\
+ } \
+ } while (0)
+
+
/*
* PSCI Function Wrappers
*
@@ -47,6 +64,7 @@ int32_t tftf_psci_cpu_on(u_register_t target_cpu,
uintptr_t entry_point_address,
u_register_t context_id);
int32_t tftf_psci_cpu_off(void);
+int32_t tftf_psci_set_suspend_mode(uint32_t mode);
int32_t tftf_psci_affinity_info(u_register_t target_affinity,
uint32_t lowest_affinity_level);
int32_t tftf_psci_node_hw_state(u_register_t target_cpu, uint32_t power_level);
@@ -158,6 +176,21 @@ typedef struct {
*/
smc_ret_values tftf_smc(const smc_args *args);
+/* Assembler routine to trigger a SMC call. */
+smc_ret_values asm_tftf_smc64(uint32_t fid, u_register_t arg1, u_register_t arg2,
+ u_register_t arg3, u_register_t arg4,
+ u_register_t arg5, u_register_t arg6,
+ u_register_t arg7);
+
+/*
+ * Update the SVE hint for the current CPU. Any SMC call made through tftf_smc
+ * will update the SVE hint bit in the SMC Function ID.
+ */
+void tftf_smc_set_sve_hint(bool sve_hint_flag);
+
+/* Return the SVE hint bit value for the current CPU */
+bool tftf_smc_get_sve_hint(void);
+
/*
* Trigger an HVC call.
*/
diff --git a/include/lib/transfer_list.h b/include/lib/transfer_list.h
new file mode 100644
index 000000000..8bf16cfca
--- /dev/null
+++ b/include/lib/transfer_list.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2023, Linaro Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __TRANSFER_LIST_H
+#define __TRANSFER_LIST_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <lib/utils_def.h>
+
+#define TRANSFER_LIST_SIGNATURE U(0x4a0fb10b)
+#define TRANSFER_LIST_VERSION U(0x0001)
+
+// Init value of maximum alignment required by any TE data in the TL
+// specified as a power of two
+#define TRANSFER_LIST_INIT_MAX_ALIGN U(3)
+
+// alignment required by TE header start address, in bytes
+#define TRANSFER_LIST_GRANULE U(8)
+
+// version of the register convention used.
+// Set to 1 for both AArch64 and AArch32 according to fw handoff spec v0.9
+#define REGISTER_CONVENTION_VERSION_MASK (1 << 24)
+
+#ifndef __ASSEMBLER__
+
+enum transfer_list_tag_id {
+ TL_TAG_EMPTY = 0,
+ TL_TAG_FDT = 1,
+ TL_TAG_HOB_BLOCK = 2,
+ TL_TAG_HOB_LIST = 3,
+ TL_TAG_ACPI_TABLE_AGGREGATE = 4,
+};
+
+enum transfer_list_ops {
+ TL_OPS_NON, // invalid for any operation
+ TL_OPS_ALL, // valid for all operations
+};
+
+struct transfer_list_header {
+ uint32_t signature;
+ uint8_t checksum;
+ uint8_t version;
+ uint8_t hdr_size;
+ uint8_t alignment; // max alignment of TE data
+ uint32_t size; // TL header + all TEs
+ uint32_t max_size;
+ /*
+ * Commented out element used to visualize dynamic part of the
+ * data structure.
+ *
+ * Note that struct transfer_list_entry also is dynamic in size
+ * so the elements can't be indexed directly but instead must be
+ * traversed in order
+ *
+ * struct transfer_list_entry entries[];
+ */
+};
+
+struct transfer_list_entry {
+ uint16_t tag_id;
+ uint8_t reserved0; // place holder
+ uint8_t hdr_size;
+ uint32_t data_size;
+ /*
+ * Commented out element used to visualize dynamic part of the
+ * data structure.
+ *
+ * Note that padding is added at the end of @data to make to reach
+ * a 8-byte boundary.
+ *
+ * uint8_t data[ROUNDUP(data_size, 8)];
+ */
+};
+
+bool transfer_list_verify_checksum(const struct transfer_list_header *tl);
+
+void *transfer_list_entry_data(struct transfer_list_entry *entry);
+
+struct transfer_list_entry *transfer_list_find(struct transfer_list_header *tl,
+ uint16_t tag_id);
+
+enum transfer_list_ops
+transfer_list_check_header(const struct transfer_list_header *tl);
+
+#endif /*__ASSEMBLER__*/
+#endif /*__TRANSFER_LIST_H*/
diff --git a/include/lib/utils_def.h b/include/lib/utils_def.h
index 78d41311d..0013d197d 100644
--- a/include/lib/utils_def.h
+++ b/include/lib/utils_def.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -162,4 +162,26 @@
#define COMPILER_BARRIER() __asm__ volatile ("" ::: "memory")
+#define INPLACE(regfield, val) \
+ (((val) + UL(0)) << (regfield##_SHIFT))
+
+#define MASK(regfield) \
+ ((~0ULL >> (64ULL - (regfield##_WIDTH))) << (regfield##_SHIFT))
+
+#define EXTRACT(regfield, reg) \
+ (((reg) & MASK(regfield)) >> (regfield##_SHIFT))
+
+/*
+ * Defines member of structure and reserves space
+ * for the next member with specified offset.
+ */
+#define SET_MEMBER(member, start, end) \
+ union { \
+ member; \
+ unsigned char reserved##end[end - start]; \
+ }
+
+#define CONCAT(x, y) x##y
+#define CONC(x, y) CONCAT(x, y)
+
#endif /* UTILS_DEF_H */
diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h
index 1fd3c83fb..ba0559c63 100644
--- a/include/lib/xlat_tables/xlat_tables_defs.h
+++ b/include/lib/xlat_tables/xlat_tables_defs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -179,4 +179,24 @@
#define XN_SHIFT 54
#define UXN_SHIFT XN_SHIFT
+/*
+ * Stage 2 translation Lower attributes
+ */
+#define S2TTE_AP_SHIFT 6
+#define S2TTE_AP_RW (3UL << S2TTE_AP_SHIFT)
+
+#define S2TTE_SH_SHIFT 8
+#define S2TTE_SH_MASK (3UL << S2TTE_SH_SHIFT)
+#define S2TTE_SH_NS (0UL << S2TTE_SH_SHIFT)
+#define S2TTE_SH_OS (2UL << S2TTE_SH_SHIFT)
+#define S2TTE_SH_IS (3UL << S2TTE_SH_SHIFT)
+
+/*
+ * Attributes when FEAT_S2FWB is enabled at EL2 (HCR_EL2.FWB == 1).
+ * For Normal WB cacheability attribute, set bit[4] to 1 and bits[3:2] to 0b10.
+ */
+#define S2TTE_MEMATTR_FWB_NORMAL_WB ((1UL << 4) | (2UL << 2))
+#define S2TTE_ATTR_FWB_WB_RW (S2TTE_MEMATTR_FWB_NORMAL_WB | S2TTE_AP_RW | \
+ S2TTE_SH_IS)
+
#endif /* XLAT_TABLES_DEFS_H */
diff --git a/include/plat/common/common_def.h b/include/plat/common/common_def.h
new file mode 100644
index 000000000..844c0c8bf
--- /dev/null
+++ b/include/plat/common/common_def.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _COMMON_DEF_H_
+#define _COMMON_DEF_H_
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif /* _COMMON_DEF_H_ */
diff --git a/include/plat/common/plat_topology.h b/include/plat/common/plat_topology.h
index 0ca5effce..fbae878f2 100644
--- a/include/plat/common/plat_topology.h
+++ b/include/plat/common/plat_topology.h
@@ -86,6 +86,8 @@ typedef struct tftf_pwr_domain_node {
unsigned char is_present;
} tftf_pwr_domain_node_t;
+extern tftf_pwr_domain_node_t tftf_pd_nodes[PLATFORM_NUM_AFFS];
+
/*
* Detect and store the platform topology so that test cases can query it later.
*/
diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h
index f3536bab7..c8b785ccb 100644
--- a/include/plat/common/platform.h
+++ b/include/plat/common/platform.h
@@ -8,6 +8,7 @@
#define __PLATFORM_H__
#include <stdint.h>
+#include <arch_helpers.h>
#include <timer.h>
#include <xlat_tables_v2.h>
@@ -184,4 +185,12 @@ int plat_get_image_source(unsigned int image_id,
void plat_fwu_io_setup(void);
+/**
+ * Returns current executing core.
+ */
+static inline uint32_t get_current_core_id(void)
+{
+ return platform_get_core_pos(read_mpidr_el1() & MPID_MASK);
+}
+
#endif /* __PLATFORM_H__ */
diff --git a/include/runtime_services/arm_arch_svc.h b/include/runtime_services/arm_arch_svc.h
index 36b44482a..0d2eb38db 100644
--- a/include/runtime_services/arm_arch_svc.h
+++ b/include/runtime_services/arm_arch_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,5 +12,6 @@
#define SMCCC_ARCH_SOC_ID 0x80000002
#define SMCCC_ARCH_WORKAROUND_1 0x80008000
#define SMCCC_ARCH_WORKAROUND_2 0x80007FFF
+#define SMCCC_ARCH_WORKAROUND_3 0x80003FFF
#endif /* __ARM_ARCH_SVC_H__ */
diff --git a/include/runtime_services/cactus_message_loop.h b/include/runtime_services/cactus_message_loop.h
index d69e77ce9..4d963ac4c 100644
--- a/include/runtime_services/cactus_message_loop.h
+++ b/include/runtime_services/cactus_message_loop.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,8 +12,8 @@
*/
struct cactus_cmd_handler {
const uint64_t id;
- smc_ret_values (*fn)(const smc_ret_values *args,
- struct mailbox_buffers *mb);
+ struct ffa_value (*fn)(const struct ffa_value *args,
+ struct mailbox_buffers *mb);
};
/**
@@ -25,8 +25,8 @@ struct cactus_cmd_handler {
* Define handler's function signature.
*/
#define CACTUS_HANDLER_FN(name) \
- static smc_ret_values CACTUS_HANDLER_FN_NAME(name)( \
- const smc_ret_values *args, struct mailbox_buffers *mb)
+ static struct ffa_value CACTUS_HANDLER_FN_NAME(name)( \
+ const struct ffa_value *args, struct mailbox_buffers *mb)
/**
* Helper to define Cactus command handler, and pair it with a command ID.
@@ -40,5 +40,5 @@ struct cactus_cmd_handler {
}; \
CACTUS_HANDLER_FN(name)
-bool cactus_handle_cmd(smc_ret_values *cmd_args, smc_ret_values *ret,
+bool cactus_handle_cmd(struct ffa_value *cmd_args, struct ffa_value *ret,
struct mailbox_buffers *mb);
diff --git a/include/runtime_services/cactus_test_cmds.h b/include/runtime_services/cactus_test_cmds.h
index 483a7f493..3938c2fce 100644
--- a/include/runtime_services/cactus_test_cmds.h
+++ b/include/runtime_services/cactus_test_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -24,20 +24,24 @@
#define CACTUS_ERROR_FFA_CALL U(3)
#define CACTUS_ERROR_UNHANDLED U(4)
+#define ECHO_VAL1 U(0xa0a0a0a0)
+#define ECHO_VAL2 U(0xb0b0b0b0)
+#define ECHO_VAL3 U(0xc0c0c0c0)
+
/**
- * Get command from struct smc_ret_values.
+ * Get command from struct ffa_value.
*/
-static inline uint64_t cactus_get_cmd(smc_ret_values ret)
+static inline uint64_t cactus_get_cmd(struct ffa_value ret)
{
- return (uint64_t)ret.ret3;
+ return (uint64_t)ret.arg3;
}
/**
* Template for commands to be sent to CACTUS partitions over direct
* messages interfaces.
*/
-static inline smc_ret_values cactus_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint64_t cmd, uint64_t val0,
+static inline struct ffa_value cactus_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint64_t cmd, uint64_t val0,
uint64_t val1, uint64_t val2, uint64_t val3)
{
return ffa_msg_send_direct_req64(source, dest, cmd, val0, val1, val2,
@@ -49,8 +53,8 @@ static inline smc_ret_values cactus_send_cmd(
* 'cactus_send_response' is the template for custom responses, in case there is
* a need to propagate more than one value in the response of a command.
*/
-static inline smc_ret_values cactus_send_response(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint32_t resp, uint32_t val0,
+static inline struct ffa_value cactus_send_response(
+ ffa_id_t source, ffa_id_t dest, uint32_t resp, uint64_t val0,
uint64_t val1, uint64_t val2, uint64_t val3)
{
return ffa_msg_send_direct_resp64(source, dest, resp, val0, val1,
@@ -60,15 +64,15 @@ static inline smc_ret_values cactus_send_response(
/**
* For responses of one value only.
*/
-static inline smc_ret_values cactus_response(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint32_t response)
+static inline struct ffa_value cactus_response(
+ ffa_id_t source, ffa_id_t dest, uint32_t response)
{
- return ffa_msg_send_direct_resp64(source, dest, response, 0, 0, 0, 0);
+ return cactus_send_response(source, dest, response, 0, 0, 0, 0);
}
-static inline uint32_t cactus_get_response(smc_ret_values ret)
+static inline uint32_t cactus_get_response(struct ffa_value ret)
{
- return (uint32_t)ret.ret3;
+ return (uint32_t)ret.arg3;
}
/**
@@ -77,8 +81,8 @@ static inline uint32_t cactus_get_response(smc_ret_values ret)
* If more arguments are needed, a custom response should be defined for the
* specific test.
*/
-static inline smc_ret_values cactus_success_resp(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint64_t value)
+static inline struct ffa_value cactus_success_resp(
+ ffa_id_t source, ffa_id_t dest, uint64_t value)
{
return cactus_send_response(source, dest, CACTUS_SUCCESS, value,
0, 0, 0);
@@ -89,16 +93,16 @@ static inline smc_ret_values cactus_success_resp(
* the reason, which can be specific to the test, or general ones as defined
* in the error code list.
*/
-static inline smc_ret_values cactus_error_resp(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint32_t error_code)
+static inline struct ffa_value cactus_error_resp(
+ ffa_id_t source, ffa_id_t dest, uint32_t error_code)
{
return cactus_send_response(source, dest, CACTUS_ERROR, error_code,
0, 0, 0);
}
-static inline uint32_t cactus_error_code(smc_ret_values ret)
+static inline uint32_t cactus_error_code(struct ffa_value ret)
{
- return (uint32_t) ret.ret4;
+ return (uint32_t) ret.arg4;
}
/**
@@ -109,16 +113,16 @@ static inline uint32_t cactus_error_code(smc_ret_values ret)
*/
#define CACTUS_ECHO_CMD U(0x6563686f)
-static inline smc_ret_values cactus_echo_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint64_t echo_val)
+static inline struct ffa_value cactus_echo_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint64_t echo_val)
{
return cactus_send_cmd(source, dest, CACTUS_ECHO_CMD, echo_val, 0, 0,
0);
}
-static inline uint64_t cactus_echo_get_val(smc_ret_values ret)
+static inline uint64_t cactus_echo_get_val(struct ffa_value ret)
{
- return (uint64_t)ret.ret4;
+ return (uint64_t)ret.arg4;
}
/**
@@ -130,17 +134,17 @@ static inline uint64_t cactus_echo_get_val(smc_ret_values ret)
*/
#define CACTUS_REQ_ECHO_CMD (CACTUS_ECHO_CMD + 1)
-static inline smc_ret_values cactus_req_echo_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, ffa_vm_id_t echo_dest,
+static inline struct ffa_value cactus_req_echo_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t echo_dest,
uint64_t echo_val)
{
return cactus_send_cmd(source, dest, CACTUS_REQ_ECHO_CMD, echo_val,
echo_dest, 0, 0);
}
-static inline ffa_vm_id_t cactus_req_echo_get_echo_dest(smc_ret_values ret)
+static inline ffa_id_t cactus_req_echo_get_echo_dest(struct ffa_value ret)
{
- return (ffa_vm_id_t)ret.ret5;
+ return (ffa_id_t)ret.arg5;
}
/**
@@ -154,16 +158,16 @@ static inline ffa_vm_id_t cactus_req_echo_get_echo_dest(smc_ret_values ret)
*/
#define CACTUS_DEADLOCK_CMD U(0x64656164)
-static inline smc_ret_values cactus_deadlock_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, ffa_vm_id_t next_dest)
+static inline struct ffa_value cactus_deadlock_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t next_dest)
{
return cactus_send_cmd(source, dest, CACTUS_DEADLOCK_CMD, next_dest, 0,
0, 0);
}
-static inline ffa_vm_id_t cactus_deadlock_get_next_dest(smc_ret_values ret)
+static inline ffa_id_t cactus_deadlock_get_next_dest(struct ffa_value ret)
{
- return (ffa_vm_id_t)ret.ret4;
+ return (ffa_id_t)ret.arg4;
}
/**
@@ -172,18 +176,18 @@ static inline ffa_vm_id_t cactus_deadlock_get_next_dest(smc_ret_values ret)
*/
#define CACTUS_REQ_DEADLOCK_CMD (CACTUS_DEADLOCK_CMD + 1)
-static inline smc_ret_values cactus_req_deadlock_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, ffa_vm_id_t next_dest1,
- ffa_vm_id_t next_dest2)
+static inline struct ffa_value cactus_req_deadlock_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t next_dest1,
+ ffa_id_t next_dest2)
{
return cactus_send_cmd(source, dest, CACTUS_REQ_DEADLOCK_CMD,
next_dest1, next_dest2, 0, 0);
}
/* To get next_dest1 use CACTUS_DEADLOCK_GET_NEXT_DEST */
-static inline ffa_vm_id_t cactus_deadlock_get_next_dest2(smc_ret_values ret)
+static inline ffa_id_t cactus_deadlock_get_next_dest2(struct ffa_value ret)
{
- return (ffa_vm_id_t)ret.ret5;
+ return (ffa_id_t)ret.arg5;
}
/**
@@ -194,17 +198,38 @@ static inline ffa_vm_id_t cactus_deadlock_get_next_dest2(smc_ret_values ret)
*/
#define CACTUS_MEM_SEND_CMD U(0x6d656d)
-static inline smc_ret_values cactus_mem_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint32_t mem_func,
- ffa_memory_handle_t handle)
+static inline struct ffa_value cactus_mem_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t mem_func,
+ ffa_memory_handle_t handle, ffa_memory_region_flags_t retrieve_flags,
+ uint16_t word_to_write, bool expect_exception)
{
+ uint64_t _expect_exception = expect_exception ? (1ULL << 32) : 0;
+ uint64_t packed = (uint64_t)word_to_write | _expect_exception;
+
return cactus_send_cmd(source, dest, CACTUS_MEM_SEND_CMD, mem_func,
- handle, 0, 0);
+ handle, retrieve_flags, packed);
+}
+
+static inline ffa_memory_handle_t cactus_mem_send_get_handle(
+ struct ffa_value ret)
+{
+ return (ffa_memory_handle_t)ret.arg5;
+}
+
+static inline ffa_memory_region_flags_t cactus_mem_send_get_retrv_flags(
+ struct ffa_value ret)
+{
+ return (ffa_memory_region_flags_t)ret.arg6;
+}
+
+static inline uint16_t cactus_mem_send_words_to_write(struct ffa_value ret)
+{
+ return (uint16_t)ret.arg7 & 0xFFFFU;
}
-static inline ffa_memory_handle_t cactus_mem_send_get_handle(smc_ret_values ret)
+static inline bool cactus_mem_send_expect_exception(struct ffa_value ret)
{
- return (ffa_memory_handle_t)ret.ret5;
+ return (bool)(ret.arg7 >> 32);
}
/**
@@ -216,22 +241,27 @@ static inline ffa_memory_handle_t cactus_mem_send_get_handle(smc_ret_values ret)
*/
#define CACTUS_REQ_MEM_SEND_CMD U(0x6d656d6f7279)
-static inline smc_ret_values cactus_req_mem_send_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint32_t mem_func,
- ffa_vm_id_t receiver)
+static inline struct ffa_value cactus_req_mem_send_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t mem_func,
+ ffa_id_t receiver, bool non_secure)
{
return cactus_send_cmd(source, dest, CACTUS_REQ_MEM_SEND_CMD, mem_func,
- receiver, 0, 0);
+ receiver, non_secure, 0);
}
-static inline uint32_t cactus_req_mem_send_get_mem_func(smc_ret_values ret)
+static inline uint32_t cactus_req_mem_send_get_mem_func(struct ffa_value ret)
{
- return (uint32_t)ret.ret4;
+ return (uint32_t)ret.arg4;
}
-static inline ffa_vm_id_t cactus_req_mem_send_get_receiver(smc_ret_values ret)
+static inline ffa_id_t cactus_req_mem_send_get_receiver(struct ffa_value ret)
{
- return (ffa_vm_id_t)ret.ret5;
+ return (ffa_id_t)ret.arg5;
+}
+
+static inline bool cactus_req_mem_send_get_non_secure(struct ffa_value ret)
+{
+ return (bool)ret.arg6;
}
/**
@@ -243,30 +273,95 @@ static inline ffa_vm_id_t cactus_req_mem_send_get_receiver(smc_ret_values ret)
*/
#define CACTUS_REQ_SIMD_FILL_CMD U(0x53494d44)
-static inline smc_ret_values cactus_req_simd_fill_send_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest)
+static inline struct ffa_value cactus_req_simd_fill_send_cmd(
+ ffa_id_t source, ffa_id_t dest)
{
return cactus_send_cmd(source, dest, CACTUS_REQ_SIMD_FILL_CMD, 0, 0, 0,
0);
}
/**
+ * Request to compare FPU state(SIMD vectors, FPCR, FPSR) content
+ * with previous template values to check a save/restore routine during the
+ * context switches between secure world and normal world.
+ */
+#define CACTUS_CMP_SIMD_VALUE_CMD (CACTUS_REQ_SIMD_FILL_CMD + 1)
+
+static inline struct ffa_value cactus_req_simd_compare_send_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_CMP_SIMD_VALUE_CMD, 0, 0, 0,
+ 0);
+}
+
+/**
* Command to request cactus to sleep for the given time in ms
*
* The command id is the hex representation of string "sleep"
*/
#define CACTUS_SLEEP_CMD U(0x736c656570)
-static inline smc_ret_values cactus_sleep_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint32_t sleep_time)
+static inline struct ffa_value cactus_sleep_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t sleep_time)
{
return cactus_send_cmd(source, dest, CACTUS_SLEEP_CMD, sleep_time, 0, 0,
0);
}
-static inline uint32_t cactus_get_sleep_time(smc_ret_values ret)
+/**
+ * Command to request cactus to forward sleep command for the given time in ms
+ *
+ * The sender of this command expects to receive CACTUS_SUCCESS if the requested
+ * echo interaction happened successfully, or CACTUS_ERROR otherwise.
+ * Moreover, the sender can send a hint to the destination SP to expect that
+ * the forwaded sleep command could be preempted by a non-secure interrupt.
+ */
+#define CACTUS_FWD_SLEEP_CMD (CACTUS_SLEEP_CMD + 1)
+
+static inline struct ffa_value cactus_fwd_sleep_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t fwd_dest,
+ uint32_t sleep_time, bool hint_interrupted)
{
- return (uint32_t)ret.ret4;
+ return cactus_send_cmd(source, dest, CACTUS_FWD_SLEEP_CMD, sleep_time,
+ fwd_dest, hint_interrupted, 0);
+}
+
+static inline uint32_t cactus_get_sleep_time(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+static inline ffa_id_t cactus_get_fwd_sleep_dest(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg5;
+}
+
+static inline bool cactus_get_fwd_sleep_interrupted_hint(struct ffa_value ret)
+{
+ return (bool)ret.arg6;
+}
+
+/**
+ * Command to request cactus to sleep for half the given time in ms, trigger
+ * trusted watchdog timer and then sleep again for another half the given time.
+ *
+ * The sender of this command expects to receive CACTUS_SUCCESS if the requested
+ * echo interaction happened successfully, or CACTUS_ERROR otherwise.
+ */
+#define CACTUS_SLEEP_TRIGGER_TWDOG_CMD (CACTUS_SLEEP_CMD + 2)
+
+static inline struct ffa_value cactus_sleep_trigger_wdog_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t sleep_time,
+ uint64_t wdog_time)
+{
+ return cactus_send_cmd(source, dest, CACTUS_SLEEP_TRIGGER_TWDOG_CMD, sleep_time,
+ wdog_time, 0, 0);
+}
+
+
+static inline uint32_t cactus_get_wdog_trigger_duration(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg5;
}
/**
@@ -276,27 +371,27 @@ static inline uint32_t cactus_get_sleep_time(smc_ret_values ret)
*/
#define CACTUS_INTERRUPT_CMD U(0x696e7472)
-static inline smc_ret_values cactus_interrupt_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest, uint32_t interrupt_id,
+static inline struct ffa_value cactus_interrupt_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t interrupt_id,
bool enable, uint32_t pin)
{
return cactus_send_cmd(source, dest, CACTUS_INTERRUPT_CMD, interrupt_id,
enable, pin, 0);
}
-static inline uint32_t cactus_get_interrupt_id(smc_ret_values ret)
+static inline uint32_t cactus_get_interrupt_id(struct ffa_value ret)
{
- return (uint32_t)ret.ret4;
+ return (uint32_t)ret.arg4;
}
-static inline bool cactus_get_interrupt_enable(smc_ret_values ret)
+static inline bool cactus_get_interrupt_enable(struct ffa_value ret)
{
- return (bool)ret.ret5;
+ return (bool)ret.arg5;
}
-static inline enum interrupt_pin cactus_get_interrupt_pin(smc_ret_values ret)
+static inline enum interrupt_pin cactus_get_interrupt_pin(struct ffa_value ret)
{
- return (enum interrupt_pin)ret.ret6;
+ return (enum interrupt_pin)ret.arg6;
}
/**
@@ -306,10 +401,257 @@ static inline enum interrupt_pin cactus_get_interrupt_pin(smc_ret_values ret)
*/
#define CACTUS_DMA_SMMUv3_CMD (0x534d4d55)
-static inline smc_ret_values cactus_send_dma_cmd(
- ffa_vm_id_t source, ffa_vm_id_t dest)
+static inline struct ffa_value cactus_send_dma_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t operation,
+ uintptr_t base, size_t range, uint32_t attributes)
+{
+ return cactus_send_cmd(source, dest, CACTUS_DMA_SMMUv3_CMD,
+ (uint64_t)operation, (uint64_t)base,
+ (uint64_t)range, attributes);
+}
+
+/*
+ * Request SP to bind a notification to a FF-A endpoint. In case of error
+ * when using the FFA_NOTIFICATION_BIND interface, include the error code
+ * in the response to the command's request. The receiver and sender arguments
+ * are propagated through the command's arguments, to allow the test of
+ * erroneous uses of the FFA_NOTIFICATION_BIND interface.
+ *
+ * The command id is the hex representation of the string "bind".
+ */
+#define CACTUS_NOTIFICATION_BIND_CMD U(0x62696e64)
+
+static inline struct ffa_value cactus_notification_bind_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ ffa_id_t sender, ffa_notification_bitmap_t notifications, uint32_t flags)
+{
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATION_BIND_CMD,
+ receiver, sender, notifications, flags);
+}
+
+/**
+ * Request to SP unbind a notification. In case of error when using the
+ * FFA_NOTIFICATION_UNBIND interface, the test includes the error code in the
+ * response. The receiver and sender arguments are propagated throught the
+ * command's arguments, to allow the test of erroneous uses of the
+ * FFA_NOTIFICATION_BIND interface.
+ *
+ * The command id is the hex representation of the string "unbind".
+ */
+#define CACTUS_NOTIFICATION_UNBIND_CMD U(0x756e62696e64)
+
+static inline struct ffa_value cactus_notification_unbind_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ ffa_id_t sender, ffa_notification_bitmap_t notifications)
{
- return cactus_send_cmd(source, dest, CACTUS_DMA_SMMUv3_CMD, 0, 0, 0,
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATION_UNBIND_CMD,
+ receiver, sender, notifications, 0);
+}
+
+static inline ffa_id_t cactus_notification_get_receiver(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg4;
+}
+
+static inline ffa_id_t cactus_notification_get_sender(struct ffa_value ret)
+{
+ return (ffa_id_t)ret.arg5;
+}
+
+static inline ffa_notification_bitmap_t cactus_notification_get_notifications(
+ struct ffa_value ret)
+{
+ return (uint64_t)ret.arg6;
+}
+
+/**
+ * Request SP to get notifications. The arguments to use in ffa_notification_get
+ * are propagated on the command to test erroneous uses of the interface.
+ * In a successful call to the interface, the SP's response payload should
+ * include all bitmaps returned by the SPMC.
+ *
+ * The command id is the hex representation of the string "getnot".
+ */
+#define CACTUS_NOTIFICATION_GET_CMD U(0x6765746e6f74)
+
+static inline struct ffa_value cactus_notification_get_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ uint32_t vcpu_id, uint32_t flags, bool check_npi_handled)
+{
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATION_GET_CMD,
+ receiver, vcpu_id, check_npi_handled, flags);
+}
+
+static inline uint32_t cactus_notification_get_vcpu(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg5;
+}
+
+static inline uint32_t cactus_notification_get_flags(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg7;
+}
+
+static inline struct ffa_value cactus_notifications_get_success_resp(
+ ffa_id_t source, ffa_id_t dest, uint64_t from_sp,
+ uint64_t from_vm)
+{
+ return cactus_send_response(source, dest, CACTUS_SUCCESS, from_sp,
+ from_vm, 0, 0);
+}
+
+static inline uint64_t cactus_notifications_get_from_sp(struct ffa_value ret)
+{
+ return (uint64_t)ret.arg4;
+}
+
+static inline uint64_t cactus_notifications_get_from_vm(struct ffa_value ret)
+{
+ return (uint64_t)ret.arg5;
+}
+
+static inline bool cactus_notifications_check_npi_handled(struct ffa_value ret)
+{
+ return (bool)ret.arg6;
+}
+
+/**
+ * Request SP to set notifications. The arguments to use in ffa_notification_set
+ * are propagated on the command to test erroneous uses of the interface.
+ * In case of error while calling the interface, the response should include the
+ * error code. If in the flags a delay SRI is requested, cactus should
+ * send a CACTUS_ECHO_CMD to the SP specified as `echo_dest`. This should help
+ * validate that the SRI is only sent when returning execution to the NWd.
+ */
+#define CACTUS_NOTIFICATIONS_SET_CMD U(0x6e6f74736574)
+
+static inline struct ffa_value cactus_notifications_set_send_cmd(
+ ffa_id_t source, ffa_id_t dest, ffa_id_t receiver,
+ ffa_id_t sender, uint32_t flags, ffa_notification_bitmap_t notifications,
+ ffa_id_t echo_dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_NOTIFICATIONS_SET_CMD,
+ (uint32_t)receiver | ((uint32_t)sender << 16),
+ echo_dest,
+ notifications, flags);
+}
+
+static inline ffa_id_t cactus_notifications_set_get_receiver(
+ struct ffa_value ret)
+{
+ return (ffa_id_t)(ret.arg4 & 0xFFFFU);
+}
+
+static inline ffa_id_t cactus_notifications_set_get_sender(struct ffa_value ret)
+{
+ return (ffa_id_t)((ret.arg4 >> 16U) & 0xFFFFU);
+}
+
+/**
+ * Request to start trusted watchdog timer.
+ *
+ * The command id is the hex representaton of the string "WDOG"
+ */
+#define CACTUS_TWDOG_START_CMD U(0x57444f47)
+
+static inline struct ffa_value cactus_send_twdog_cmd(
+ ffa_id_t source, ffa_id_t dest, uint64_t time)
+{
+ return cactus_send_cmd(source, dest, CACTUS_TWDOG_START_CMD, time, 0, 0,
+ 0);
+}
+
+static inline uint32_t cactus_get_wdog_duration(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+/**
+ * Request SP to return the current count of handled requests.
+ *
+ * The command id is the hex representation of the string "getnot".
+ */
+#define CACTUS_GET_REQ_COUNT_CMD U(0x726571636f756e74)
+
+static inline struct ffa_value cactus_get_req_count_send_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_GET_REQ_COUNT_CMD, 0, 0, 0,
0);
}
+
+static inline uint32_t cactus_get_req_count(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+/**
+ * Request SP to return the last serviced secure virtual interrupt.
+ *
+ * The command id is the hex representaton of the string "vINT"
+ */
+#define CACTUS_LAST_INTERRUPT_SERVICED_CMD U(0x76494e54)
+
+static inline struct ffa_value cactus_get_last_interrupt_cmd(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_LAST_INTERRUPT_SERVICED_CMD,
+ 0, 0, 0, 0);
+}
+
+/**
+ * Request SP to resume the task requested by current endpoint after managed
+ * exit.
+ *
+ * The command id is the hex representation of the string "RAME" which denotes
+ * (R)esume (A)fter (M)anaged (E)xit.
+ */
+#define CACTUS_RESUME_AFTER_MANAGED_EXIT U(0x52414d45)
+
+static inline struct ffa_value cactus_resume_after_managed_exit(
+ ffa_id_t source, ffa_id_t dest)
+{
+ return cactus_send_cmd(source, dest, CACTUS_RESUME_AFTER_MANAGED_EXIT,
+ 0, 0, 0, 0);
+}
+
+/**
+ * Request SP to pend an interrupt in the extended SPI range.
+ *
+ * The command is the hex representation of the string "espi".
+ */
+#define CACTUS_TRIGGER_ESPI_CMD U(0x65737069)
+static inline struct ffa_value cactus_trigger_espi_cmd(
+ ffa_id_t source, ffa_id_t dest, uint32_t espi_id)
+{
+ return cactus_send_cmd(source, dest, CACTUS_TRIGGER_ESPI_CMD,
+ espi_id, 0, 0, 0);
+}
+
+static inline uint32_t cactus_get_espi_id(struct ffa_value ret)
+{
+ return (uint32_t)ret.arg4;
+}
+
+/*
+ * Request SP to mimic handling a RAS error delegated by an EL3 logical secure
+ * partition.
+ *
+ * The command ID is the hex representation of the string 'rase' which
+ * denotes RAS Error.
+ */
+#define CACTUS_RAS_DELEGATE_CMD U(0x72617365)
+
+static inline struct ffa_value cactus_ras_delegate_send_cmd(
+ ffa_id_t source, ffa_id_t dest, uint64_t event_id)
+{
+ return cactus_send_cmd(source, dest, CACTUS_RAS_DELEGATE_CMD, event_id, 0, 0,
+ 0);
+}
+
+static inline uint64_t cactus_ras_get_event_id(struct ffa_value ret)
+{
+ return (uint64_t)ret.arg4;
+}
+
#endif
diff --git a/include/runtime_services/errata_abi.h b/include/runtime_services/errata_abi.h
new file mode 100644
index 000000000..dc50113ae
--- /dev/null
+++ b/include/runtime_services/errata_abi.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Definitions related to the Errata ABI feature
+ * as per the SMC Calling Convention.
+ */
+
+#ifndef __ERRATA_ABI_H__
+#define __ERRATA_ABI_H__
+
+#ifndef __ASSEMBLY__
+#include <stdbool.h>
+#include <stdint.h>
+#include <tftf_lib.h>
+#include <platform_def.h>
+#endif
+
+/*******************************************************************************
+ * Macro to create the array entry for EM_ABI_functions[]
+ ******************************************************************************/
+#define DEFINE_EM_FUNC(_func_id, _mandatory) \
+ { EM_##_func_id, _mandatory, "SMC_" # _func_id }
+
+/*******************************************************************************
+ * Errata ABI feature supported function ids
+ ******************************************************************************/
+#define EM_VERSION 0x840000F0
+#define EM_FEATURES 0x840000F1
+#define EM_CPU_ERRATUM_FEATURES 0x840000F2
+
+/*
+ * Number of EM ABI calls defined in the specification.
+ */
+#define TOTAL_ABI_CALLS (3U)
+
+#define ERRATA_COUNT (32U)
+
+typedef struct {
+ uint32_t id;
+ bool mandatory;
+ const char *str;
+} em_function_t;
+
+typedef struct em_cpu_errata {
+ int em_errata_id;
+ unsigned int rxpx_low;
+ unsigned int rxpx_high;
+} em_cpu_errata_t;
+
+typedef struct em_cpu{
+ uint16_t cpu_pn;
+ em_cpu_errata_t cpu_errata[ERRATA_COUNT];
+}em_cpu_t;
+
+extern const em_function_t em_functions[TOTAL_ABI_CALLS];
+int32_t tftf_em_abi_version(void);
+bool tftf_em_abi_feature_implemented(uint32_t id);
+smc_ret_values tftf_em_abi_cpu_feature_implemented(uint32_t cpu_erratum, uint32_t forward_flag);
+
+
+#define IN_RANGE(x, y, z) (((x >= y) && (x <= z)) ? true : false)
+
+/*******************************************************************************
+ * Errata ABI Version
+ ******************************************************************************/
+#define EM_MAJOR_VER_SHIFT (16)
+#define EM_MAJOR_VER_MASK (0xFFFF)
+#define EM_MINOR_VER_SHIFT (0)
+#define EM_MINOR_VER_MASK (0xFFFF)
+#define EM_ABI_VERSION(major, minor) (((major & EM_MAJOR_VER_MASK) << EM_MAJOR_VER_SHIFT) \
+ | ((minor & EM_MINOR_VER_MASK) << EM_MINOR_VER_SHIFT))
+/*******************************************************************************
+ * Error codes
+ ******************************************************************************/
+#define EM_HIGHER_EL_MITIGATION (3)
+#define EM_NOT_AFFECTED (2)
+#define EM_AFFECTED (1)
+#define EM_SUCCESS (0)
+#define EM_NOT_SUPPORTED (-1)
+#define EM_INVALID_PARAMETERS (-2)
+#define EM_UNKNOWN_ERRATUM (-3)
+#endif /* __ERRATA_ABI_H__ */
+
diff --git a/include/runtime_services/ffa_endpoints.h b/include/runtime_services/ffa_endpoints.h
index 7a6cd4f7b..26297bccd 100644
--- a/include/runtime_services/ffa_endpoints.h
+++ b/include/runtime_services/ffa_endpoints.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,10 +7,21 @@
#ifndef FFA_ENDPOINTS_H
#define FFA_ENDPOINTS_H
+#include <platform_def.h>
+
/* UUID of cactus SPs as defined in the respective manifests. */
-#define PRIMARY_UUID {0xb4b5671e, 0x4a904fe1, 0xb81ffb13, 0xdae1dacb}
-#define SECONDARY_UUID {0xd1582309, 0xf02347b9, 0x827c4464, 0xf5578fc8}
-#define TERTIARY_UUID {0x79b55c73, 0x1d8c44b9, 0x859361e1, 0x770ad8d2}
+#define PRIMARY_UUID {0x1e67b5b4, 0xe14f904a, 0x13fb1fb8, 0xcbdae1da}
+#define SECONDARY_UUID {0x092358d1, 0xb94723f0, 0x64447c82, 0xc88f57f5}
+#define TERTIARY_UUID {0x735cb579, 0xb9448c1d, 0xe1619385, 0xd2d80a77}
+#define IVY_UUID {0xd883baea, 0xaf4eafba, 0xfdf74481, 0xa744e5cb}
+#define EL3_SPMD_LP_UUID {0xe98e43ad, 0xb7db524f, 0x47a3bf57, 0x1588f4e3}
+
+/* vcpu_count of cactus SPs. */
+#define PRIMARY_EXEC_CTX_COUNT PLATFORM_CORE_COUNT
+#define SECONDARY_EXEC_CTX_COUNT PLATFORM_CORE_COUNT
+#define TERTIARY_EXEC_CTX_COUNT (1)
+#define IVY_EXEC_CTX_COUNT (1)
+#define EL3_SPMD_LP_EXEC_CTX_COUNT (1)
/* UUID of OPTEE SP as defined in the respective manifest. */
#define OPTEE_UUID {0x486178e0, 0xe7f811e3, 0xbc5e0002, 0xa5d5c51b}
diff --git a/include/runtime_services/ffa_helpers.h b/include/runtime_services/ffa_helpers.h
index 592327af7..0f198278d 100644
--- a/include/runtime_services/ffa_helpers.h
+++ b/include/runtime_services/ffa_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,7 +14,7 @@
/* This error code must be different to the ones used by FFA */
#define FFA_TFTF_ERROR -42
-typedef unsigned short ffa_vm_id_t;
+typedef unsigned short ffa_id_t;
typedef unsigned short ffa_vm_count_t;
typedef unsigned short ffa_vcpu_count_t;
typedef uint64_t ffa_memory_handle_t;
@@ -22,28 +22,344 @@ typedef uint64_t ffa_memory_handle_t;
typedef uint8_t ffa_memory_receiver_flags_t;
struct ffa_uuid {
- const uint32_t uuid[4];
+ uint32_t uuid[4];
+};
+
+/** Length in bytes of the name in boot information descriptor. */
+#define FFA_BOOT_INFO_NAME_LEN 16
+
+/**
+ * The FF-A boot info descriptor, as defined in table 5.8 of section 5.4.1, of
+ * the FF-A v1.1 EAC0 specification.
+ */
+struct ffa_boot_info_desc {
+ char name[FFA_BOOT_INFO_NAME_LEN];
+ uint8_t type;
+ uint8_t reserved;
+ uint16_t flags;
+ uint32_t size;
+ uint64_t content;
+};
+
+/** FF-A boot information type mask. */
+#define FFA_BOOT_INFO_TYPE_SHIFT 7
+#define FFA_BOOT_INFO_TYPE_MASK (0x1U << FFA_BOOT_INFO_TYPE_SHIFT)
+#define FFA_BOOT_INFO_TYPE_STD 0U
+#define FFA_BOOT_INFO_TYPE_IMPDEF 1U
+
+/** Standard boot info type IDs. */
+#define FFA_BOOT_INFO_TYPE_ID_MASK 0x7FU
+#define FFA_BOOT_INFO_TYPE_ID_FDT 0U
+#define FFA_BOOT_INFO_TYPE_ID_HOB 1U
+
+/** FF-A Boot Info descriptors flags. */
+#define FFA_BOOT_INFO_FLAG_MBZ_MASK 0xFFF0U
+
+/** Bits [1:0] encode the format of the name field in ffa_boot_info_desc. */
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_SHIFT 0U
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK \
+ (0x3U << FFA_BOOT_INFO_FLAG_NAME_FORMAT_SHIFT)
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING 0x0U
+#define FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID 0x1U
+
+/** Bits [3:2] encode the format of the content field in ffa_boot_info_desc. */
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT 2
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK \
+ (0x3U << FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT)
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_VALUE 0x1U
+#define FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR 0x0U
+
+static inline uint16_t ffa_boot_info_content_format(
+ struct ffa_boot_info_desc *desc)
+{
+ return (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
+ FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
+}
+
+static inline uint16_t ffa_boot_info_name_format(
+ struct ffa_boot_info_desc *desc)
+{
+ return (desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK) >>
+ FFA_BOOT_INFO_FLAG_NAME_FORMAT_SHIFT;
+}
+
+static inline uint8_t ffa_boot_info_type_id(struct ffa_boot_info_desc *desc)
+{
+ return desc->type & FFA_BOOT_INFO_TYPE_ID_MASK;
+}
+
+static inline uint8_t ffa_boot_info_type(struct ffa_boot_info_desc *desc)
+{
+ return (desc->type & FFA_BOOT_INFO_TYPE_MASK) >>
+ FFA_BOOT_INFO_TYPE_SHIFT;
+}
+
+/** Length in bytes of the signature in the boot descriptor. */
+#define FFA_BOOT_INFO_HEADER_SIGNATURE_LEN 4
+
+/**
+ * The FF-A boot information header, as defined in table 5.9 of section 5.4.2,
+ * of the FF-A v1.1 EAC0 specification.
+ */
+struct ffa_boot_info_header {
+ uint32_t signature;
+ uint32_t version;
+ uint32_t info_blob_size;
+ uint32_t desc_size;
+ uint32_t desc_count;
+ uint32_t desc_offset;
+ uint64_t reserved;
+ struct ffa_boot_info_desc boot_info[];
};
#ifndef __ASSEMBLY__
+#include <cassert.h>
#include <stdint.h>
+/**
+ * FF-A Feature ID, to be used with interface FFA_FEATURES.
+ * As defined in the FF-A v1.1 Beta specification, table 13.10, in section
+ * 13.2.
+ */
+
+/** Query interrupt ID of Notification Pending Interrupt. */
+#define FFA_FEATURE_NPI 0x1U
+
+/** Query interrupt ID of Schedule Receiver Interrupt. */
+#define FFA_FEATURE_SRI 0x2U
+
+/** Query interrupt ID of the Managed Exit Interrupt. */
+#define FFA_FEATURE_MEI 0x3U
+
+/** Partition property: partition supports receipt of direct requests. */
+#define FFA_PARTITION_DIRECT_REQ_RECV (UINT32_C(1) << 0)
+
+/** Partition property: partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_REQ_SEND (UINT32_C(1) << 1)
+
+/** Partition property: partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG (UINT32_C(1) << 2)
+
+/** Partition property: partition can receive notifications. */
+#define FFA_PARTITION_NOTIFICATION (UINT32_C(1) << 3)
+
+/** Partition property: partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC (UINT32_C(1) << 8)
+
+/** Partition info descriptor as defined in FF-A v1.1 EAC0 Table 13.37 */
struct ffa_partition_info {
/** The ID of the VM the information is about */
- ffa_vm_id_t id;
+ ffa_id_t id;
/** The number of execution contexts implemented by the partition */
uint16_t exec_context;
/** The Partition's properties, e.g. supported messaging methods */
uint32_t properties;
+ /** The uuid of the partition */
+ struct ffa_uuid uuid;
};
-static inline uint32_t ffa_func_id(smc_ret_values val) {
- return (uint32_t) val.ret0;
+/**
+ * Bits[31:3] of partition properties must be zero for FF-A v1.0.
+ * This corresponds to table 8.25 "Partition information descriptor"
+ * in DEN0077A FF-A 1.0 REL specification.
+ */
+#define FFA_PARTITION_v1_0_RES_MASK (~(UINT32_C(0x7)))
+
+/**
+ * Partition info descriptor as defined in Table 8.25 of the v1.0
+ * FF-A Specification (DEN0077A).
+ */
+struct ffa_partition_info_v1_0 {
+ /** The ID of the VM the information is about */
+ ffa_id_t id;
+ /** The number of execution contexts implemented by the partition */
+ uint16_t exec_context;
+ /** The Partition's properties, e.g. supported messaging methods */
+ uint32_t properties;
+};
+
+struct ffa_value {
+ u_register_t fid;
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+ u_register_t arg8;
+ u_register_t arg9;
+ u_register_t arg10;
+ u_register_t arg11;
+ u_register_t arg12;
+ u_register_t arg13;
+ u_register_t arg14;
+ u_register_t arg15;
+ u_register_t arg16;
+ u_register_t arg17;
+};
+
+/* Function to make an SMC or SVC service call depending on the exception
+ * level of the SP.
+ */
+struct ffa_value ffa_service_call(struct ffa_value *args);
+
+/*
+ * Functions to trigger a service call.
+ *
+ * The arguments to pass through the service call must be stored in the
+ * ffa_value structure. The return values of the service call will be stored
+ * in the same structure (overriding the input arguments).
+ *
+ * Return the first return value. It is equivalent to args.fid but is also
+ * provided as the return value for convenience.
+ */
+u_register_t ffa_svc(struct ffa_value *args);
+u_register_t ffa_smc(struct ffa_value *args);
+
+static inline uint32_t ffa_func_id(struct ffa_value val)
+{
+ return (uint32_t)val.fid;
+}
+
+static inline int32_t ffa_error_code(struct ffa_value val)
+{
+ return (int32_t)val.arg2;
+}
+
+static inline ffa_id_t ffa_endpoint_id(struct ffa_value val) {
+ return (ffa_id_t)val.arg2 & 0xffff;
+}
+
+static inline uint32_t ffa_partition_info_count(struct ffa_value val)
+{
+ return (uint32_t)val.arg2;
+}
+
+static inline uint32_t ffa_partition_info_desc_size(struct ffa_value val)
+{
+ return (uint32_t)val.arg3;
+}
+
+static inline uint32_t ffa_feature_intid(struct ffa_value val)
+{
+ return (uint32_t)val.arg2;
+}
+
+static inline uint16_t ffa_partition_info_regs_get_last_idx(
+ struct ffa_value args)
+{
+ return args.arg2 & 0xFFFF;
+}
+
+static inline uint16_t ffa_partition_info_regs_get_curr_idx(
+ struct ffa_value args)
+{
+ return (args.arg2 >> 16) & 0xFFFF;
}
-static inline int32_t ffa_error_code(smc_ret_values val) {
- return (int32_t) val.ret2;
+static inline uint16_t ffa_partition_info_regs_get_tag(struct ffa_value args)
+{
+ return (args.arg2 >> 32) & 0xFFFF;
+}
+
+static inline uint16_t ffa_partition_info_regs_get_desc_size(
+ struct ffa_value args)
+{
+ return (args.arg2 >> 48);
+}
+
+static inline uint32_t ffa_partition_info_regs_partition_count(
+ struct ffa_value args)
+{
+ return ffa_partition_info_regs_get_last_idx(args) + 1;
+}
+
+static inline uint32_t ffa_partition_info_regs_entry_count(
+ struct ffa_value args, uint16_t start_idx)
+{
+ return (ffa_partition_info_regs_get_curr_idx(args) - start_idx + 1);
+}
+
+static inline uint16_t ffa_partition_info_regs_entry_size(
+ struct ffa_value args)
+{
+ return (args.arg2 >> 48) & 0xFFFFU;
+}
+
+typedef uint64_t ffa_notification_bitmap_t;
+
+#define FFA_NOTIFICATION(ID) (UINT64_C(1) << ID)
+
+#define MAX_FFA_NOTIFICATIONS UINT32_C(64)
+
+#define FFA_NOTIFICATIONS_FLAG_PER_VCPU UINT32_C(0x1 << 0)
+
+/** Flag to delay Schedule Receiver Interrupt. */
+#define FFA_NOTIFICATIONS_FLAG_DELAY_SRI UINT32_C(0x1 << 1)
+
+#define FFA_NOTIFICATIONS_FLAGS_VCPU_ID(id) UINT32_C((id & 0xFFFF) << 16)
+
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_SP UINT32_C(0x1 << 0)
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_VM UINT32_C(0x1 << 1)
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_SPM UINT32_C(0x1 << 2)
+#define FFA_NOTIFICATIONS_FLAG_BITMAP_HYP UINT32_C(0x1 << 3)
+
+/**
+ * The following is an SGI ID, that the SPMC configures as non-secure, as
+ * suggested by the FF-A v1.1 specification, in section 9.4.1.
+ */
+#define FFA_SCHEDULE_RECEIVER_INTERRUPT_ID 8
+
+#define FFA_NOTIFICATIONS_BITMAP(lo, hi) \
+ (ffa_notification_bitmap_t)(lo) | \
+ (((ffa_notification_bitmap_t)hi << 32) & 0xFFFFFFFF00000000ULL)
+
+#define FFA_NOTIFICATIONS_FLAGS_VCPU_ID(id) UINT32_C((id & 0xFFFF) << 16)
+
+static inline ffa_notification_bitmap_t ffa_notifications_get_from_sp(
+ struct ffa_value val)
+{
+ return FFA_NOTIFICATIONS_BITMAP(val.arg2, val.arg3);
+}
+
+static inline ffa_notification_bitmap_t ffa_notifications_get_from_vm(
+ struct ffa_value val)
+{
+ return FFA_NOTIFICATIONS_BITMAP(val.arg4, val.arg5);
+}
+
+/*
+ * FFA_NOTIFICATION_INFO_GET is a SMC64 interface.
+ * The following macros are defined for SMC64 implementation.
+ */
+#define FFA_NOTIFICATIONS_INFO_GET_MAX_IDS 20U
+
+#define FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING UINT64_C(0x1)
+
+#define FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT 0x7U
+#define FFA_NOTIFICATIONS_LISTS_COUNT_MASK 0x1FU
+#define FFA_NOTIFICATIONS_LIST_SHIFT(l) (2 * (l - 1) + 12)
+#define FFA_NOTIFICATIONS_LIST_SIZE_MASK 0x3U
+
+static inline uint32_t ffa_notifications_info_get_lists_count(
+ struct ffa_value ret)
+{
+ return (uint32_t)(ret.arg2 >> FFA_NOTIFICATIONS_LISTS_COUNT_SHIFT)
+ & FFA_NOTIFICATIONS_LISTS_COUNT_MASK;
+}
+
+static inline uint32_t ffa_notifications_info_get_list_size(
+ struct ffa_value ret, uint32_t list)
+{
+ return (uint32_t)(ret.arg2 >> FFA_NOTIFICATIONS_LIST_SHIFT(list)) &
+ FFA_NOTIFICATIONS_LIST_SIZE_MASK;
+}
+
+static inline bool ffa_notifications_info_get_more_pending(struct ffa_value ret)
+{
+ return (ret.arg2 & FFA_NOTIFICATIONS_INFO_GET_FLAG_MORE_PENDING) != 0U;
}
enum ffa_data_access {
@@ -84,77 +400,49 @@ enum ffa_memory_shareability {
FFA_MEMORY_INNER_SHAREABLE,
};
-typedef uint8_t ffa_memory_access_permissions_t;
+typedef struct {
+ uint8_t data_access : 2;
+ uint8_t instruction_access : 2;
+} ffa_memory_access_permissions_t;
+
+_Static_assert(sizeof(ffa_memory_access_permissions_t) == sizeof(uint8_t),
+ "ffa_memory_access_permissions_t must be 1 byte wide");
/**
- * This corresponds to table "Memory region attributes descriptor" of the FF-A
- * 1.0 specification.
+ * FF-A v1.1 REL0 Table 10.18 memory region attributes descriptor NS Bit 6.
+ * Per section 10.10.4.1, NS bit is reserved for FFA_MEM_DONATE/LEND/SHARE
+ * and FFA_MEM_RETRIEVE_REQUEST.
*/
-typedef uint8_t ffa_memory_attributes_t;
-
-#define FFA_DATA_ACCESS_OFFSET (0x0U)
-#define FFA_DATA_ACCESS_MASK ((0x3U) << FFA_DATA_ACCESS_OFFSET)
-
-#define FFA_INSTRUCTION_ACCESS_OFFSET (0x2U)
-#define FFA_INSTRUCTION_ACCESS_MASK ((0x3U) << FFA_INSTRUCTION_ACCESS_OFFSET)
-
-#define FFA_MEMORY_TYPE_OFFSET (0x4U)
-#define FFA_MEMORY_TYPE_MASK ((0x3U) << FFA_MEMORY_TYPE_OFFSET)
-
-#define FFA_MEMORY_CACHEABILITY_OFFSET (0x2U)
-#define FFA_MEMORY_CACHEABILITY_MASK ((0x3U) << FFA_MEMORY_CACHEABILITY_OFFSET)
-
-#define FFA_MEMORY_SHAREABILITY_OFFSET (0x0U)
-#define FFA_MEMORY_SHAREABILITY_MASK ((0x3U) << FFA_MEMORY_SHAREABILITY_OFFSET)
-
-#define ATTR_FUNCTION_SET(name, container_type, offset, mask) \
- static inline void ffa_set_##name##_attr(container_type *attr, \
- const enum ffa_##name perm) \
- { \
- *attr = (*attr & ~(mask)) | ((perm << offset) & mask); \
- }
-
-#define ATTR_FUNCTION_GET(name, container_type, offset, mask) \
- static inline enum ffa_##name ffa_get_##name##_attr( \
- container_type attr) \
- { \
- return (enum ffa_##name)((attr & mask) >> offset); \
- }
+enum ffa_memory_security {
+ FFA_MEMORY_SECURITY_UNSPECIFIED = 0,
+ FFA_MEMORY_SECURITY_SECURE = 0,
+ FFA_MEMORY_SECURITY_NON_SECURE,
+};
-ATTR_FUNCTION_SET(data_access, ffa_memory_access_permissions_t,
- FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
-ATTR_FUNCTION_GET(data_access, ffa_memory_access_permissions_t,
- FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
-
-ATTR_FUNCTION_SET(instruction_access, ffa_memory_access_permissions_t,
- FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
-ATTR_FUNCTION_GET(instruction_access, ffa_memory_access_permissions_t,
- FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
-
-ATTR_FUNCTION_SET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
- FFA_MEMORY_TYPE_MASK)
-ATTR_FUNCTION_GET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
- FFA_MEMORY_TYPE_MASK)
-
-ATTR_FUNCTION_SET(memory_cacheability, ffa_memory_attributes_t,
- FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
-ATTR_FUNCTION_GET(memory_cacheability, ffa_memory_attributes_t,
- FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
-
-ATTR_FUNCTION_SET(memory_shareability, ffa_memory_attributes_t,
- FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
-ATTR_FUNCTION_GET(memory_shareability, ffa_memory_attributes_t,
- FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
-
-#define FFA_MEMORY_HANDLE_ALLOCATOR_MASK \
- ((ffa_memory_handle_t)(UINT64_C(1) << 63))
-#define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR \
- ((ffa_memory_handle_t)(UINT64_C(1) << 63))
+/**
+ * This corresponds to table 10.18 of the FF-A v1.1 EAC0 specification, "Memory
+ * region attributes descriptor".
+ */
+typedef struct {
+ uint16_t shareability : 2;
+ uint16_t cacheability : 2;
+ uint16_t type : 2;
+ uint16_t security : 1;
+} ffa_memory_attributes_t;
+
+_Static_assert(sizeof(ffa_memory_attributes_t) == sizeof(uint16_t),
+ "ffa_memory_attributes_t must be 2 bytes wide");
+
+#define FFA_MEMORY_HANDLE_ALLOCATOR_MASK UINT64_C(1)
+#define FFA_MEMORY_HANDLE_ALLOCATOR_SHIFT 63U
+#define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR UINT64_C(1)
+#define FFA_MEMORY_HANDLE_ALLOCATOR_SPMC UINT64_C(0)
#define FFA_MEMORY_HANDLE_INVALID (~UINT64_C(0))
/**
* A set of contiguous pages which is part of a memory region. This corresponds
- * to table "Constituent memory region descriptor" of the FFA 1.0 specification.
+ * to table 10.14 of the FF-A v1.1 EAC0 specification, "Constituent memory
+ * region descriptor".
*/
struct ffa_memory_region_constituent {
/**
@@ -169,8 +457,8 @@ struct ffa_memory_region_constituent {
};
/**
- * A set of pages comprising a memory region. This corresponds to table
- * "Composite memory region descriptor" of the FFA 1.0 specification.
+ * A set of pages comprising a memory region. This corresponds to table 10.13 of
+ * the FF-A v1.1 EAC0 specification, "Composite memory region descriptor".
*/
struct ffa_composite_memory_region {
/**
@@ -196,7 +484,7 @@ struct ffa_composite_memory_region {
*/
struct ffa_memory_region_attributes {
/** The ID of the VM to which the memory is being given or shared. */
- ffa_vm_id_t receiver;
+ ffa_id_t receiver;
/**
* The permissions with which the memory region should be mapped in the
* receiver's page table.
@@ -237,7 +525,11 @@ typedef uint32_t ffa_memory_region_flags_t;
#define FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE ((0x3U) << 3)
/** The maximum number of recipients a memory region may be sent to. */
-#define MAX_MEM_SHARE_RECIPIENTS 1U
+#define MAX_MEM_SHARE_RECIPIENTS 2U
+
+struct ffa_memory_access_impdef {
+ uint64_t val[2];
+};
/**
* This corresponds to table "Endpoint memory access descriptor" of the FFA 1.0
@@ -250,24 +542,24 @@ struct ffa_memory_access {
* an `ffa_composite_memory_region` struct.
*/
uint32_t composite_memory_region_offset;
+ /* Space for implementation defined information */
+ struct ffa_memory_access_impdef impdef;
uint64_t reserved_0;
};
/**
* Information about a set of pages which are being shared. This corresponds to
- * table "Lend, donate or share memory transaction descriptor" of the FFA
- * 1.0 specification. Note that it is also used for retrieve requests and
- * responses.
+ * table 10.20 of the FF-A v1.1 EAC0 specification, "Lend, donate or share
+ * memory transaction descriptor". Note that it is also used for retrieve
+ * requests and responses.
*/
struct ffa_memory_region {
/**
* The ID of the VM which originally sent the memory region, i.e. the
* owner.
*/
- ffa_vm_id_t sender;
+ ffa_id_t sender;
ffa_memory_attributes_t attributes;
- /** Reserved field, must be 0. */
- uint8_t reserved_0;
/** Flags to control behaviour of the transaction. */
ffa_memory_region_flags_t flags;
ffa_memory_handle_t handle;
@@ -276,15 +568,22 @@ struct ffa_memory_region {
* memory region.
*/
uint64_t tag;
- /** Reserved field, must be 0. */
- uint32_t reserved_1;
+ /** Size of the memory access descriptor. */
+ uint32_t memory_access_desc_size;
/**
* The number of `ffa_memory_access` entries included in this
* transaction.
*/
uint32_t receiver_count;
/**
- * An array of `attribute_count` endpoint memory access descriptors.
+ * Offset of the 'receivers' field, which relates to the memory access
+ * descriptors.
+ */
+ uint32_t receivers_offset;
+ /** Reserved field (12 bytes) must be 0. */
+ uint32_t reserved[3];
+ /**
+ * An array of `receiver_count` endpoint memory access descriptors.
* Each one specifies a memory region offset, an endpoint and the
* attributes with which this memory region should be mapped in that
* endpoint's page table.
@@ -294,23 +593,72 @@ struct ffa_memory_region {
/**
* Descriptor used for FFA_MEM_RELINQUISH requests. This corresponds to table
- * "Descriptor to relinquish a memory region" of the FFA 1.0 specification.
+ * 16.25 of the FF-A v1.1 EAC0 specification, "Descriptor to relinquish a memory
+ * region".
*/
struct ffa_mem_relinquish {
ffa_memory_handle_t handle;
ffa_memory_region_flags_t flags;
uint32_t endpoint_count;
- ffa_vm_id_t endpoints[];
+ ffa_id_t endpoints[];
};
static inline ffa_memory_handle_t ffa_assemble_handle(uint32_t h1, uint32_t h2)
{
- return (uint64_t)h1 | (uint64_t)h2 << 32;
+ return (ffa_notification_bitmap_t)h1 |
+ (ffa_notification_bitmap_t)h2 << 32;
+}
+
+static inline ffa_memory_handle_t ffa_mem_success_handle(struct ffa_value r)
+{
+ return ffa_assemble_handle(r.arg2, r.arg3);
+}
+
+static inline ffa_memory_handle_t ffa_frag_handle(struct ffa_value r)
+{
+ return ffa_assemble_handle(r.arg1, r.arg2);
+}
+
+static inline ffa_id_t ffa_frag_sender(struct ffa_value args)
+{
+ return (args.arg4 >> 16) & 0xffff;
}
-static inline ffa_memory_handle_t ffa_mem_success_handle(smc_ret_values r)
+/**
+ * To maintain forwards compatability we can't make assumptions about the size
+ * of the endpoint memory access descriptor so provide a helper function
+ * to get a receiver from the receiver array using the memory access descriptor
+ * size field from the memory region descriptor struct.
+ * Returns NULL if we cannot return the receiver.
+ */
+static inline struct ffa_memory_access *ffa_memory_region_get_receiver(
+ struct ffa_memory_region *memory_region, uint32_t receiver_index)
{
- return ffa_assemble_handle(r.ret2, r.ret3);
+ uint32_t memory_access_desc_size =
+ memory_region->memory_access_desc_size;
+
+ if (receiver_index >= memory_region->receiver_count) {
+ return NULL;
+ }
+
+ /*
+ * Memory access descriptor size cannot be greater than the size of
+ * the memory access descriptor defined by the current FF-A version.
+ */
+ if (memory_access_desc_size > sizeof(struct ffa_memory_access)) {
+ return NULL;
+ }
+
+ /* Check we cannot use receivers offset to cause overflow. */
+ if (memory_region->receivers_offset !=
+ sizeof(struct ffa_memory_region)) {
+ return NULL;
+ }
+
+ return (struct ffa_memory_access *)((uint8_t *)memory_region +
+ memory_region->receivers_offset +
+ (receiver_index *
+ memory_access_desc_size));
}
/**
@@ -335,81 +683,122 @@ ffa_memory_region_get_composite(struct ffa_memory_region *memory_region,
static inline uint32_t ffa_mem_relinquish_init(
struct ffa_mem_relinquish *relinquish_request,
ffa_memory_handle_t handle, ffa_memory_region_flags_t flags,
- ffa_vm_id_t sender)
+ ffa_id_t sender)
{
relinquish_request->handle = handle;
relinquish_request->flags = flags;
relinquish_request->endpoint_count = 1;
relinquish_request->endpoints[0] = sender;
- return sizeof(struct ffa_mem_relinquish) + sizeof(ffa_vm_id_t);
+ return sizeof(struct ffa_mem_relinquish) + sizeof(ffa_id_t);
}
uint32_t ffa_memory_retrieve_request_init(
struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
- ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
enum ffa_memory_shareability shareability);
+void ffa_hypervisor_retrieve_request_init(struct ffa_memory_region *region,
+ ffa_memory_handle_t handle);
+
uint32_t ffa_memory_region_init(
struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count,
const struct ffa_memory_region_constituent constituents[],
uint32_t constituent_count, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
- enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
+ ffa_memory_region_flags_t flags, enum ffa_memory_type type,
+ enum ffa_memory_cacheability cacheability,
enum ffa_memory_shareability shareability, uint32_t *total_length,
uint32_t *fragment_length);
-static inline ffa_vm_id_t ffa_dir_msg_dest(smc_ret_values val) {
- return (ffa_vm_id_t)val.ret1 & U(0xFFFF);
+uint32_t ffa_memory_fragment_init(
+ struct ffa_memory_region_constituent *fragment,
+ size_t fragment_max_size,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t *fragment_length);
+
+static inline ffa_id_t ffa_dir_msg_dest(struct ffa_value val) {
+ return (ffa_id_t)val.arg1 & U(0xFFFF);
}
-static inline ffa_vm_id_t ffa_dir_msg_source(smc_ret_values val) {
- return (ffa_vm_id_t)(val.ret1 >> 16U);
+static inline ffa_id_t ffa_dir_msg_source(struct ffa_value val) {
+ return (ffa_id_t)(val.arg1 >> 16U);
}
-smc_ret_values ffa_msg_send_direct_req64(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4);
-
-smc_ret_values ffa_msg_send_direct_req32(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint32_t arg0,
- uint32_t arg1, uint32_t arg2,
- uint32_t arg3, uint32_t arg4);
-
-smc_ret_values ffa_msg_send_direct_resp64(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4);
-
-smc_ret_values ffa_msg_send_direct_resp32(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint32_t arg0,
- uint32_t arg1, uint32_t arg2,
- uint32_t arg3, uint32_t arg4);
-
-smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id);
-smc_ret_values ffa_version(uint32_t input_version);
-smc_ret_values ffa_id_get(void);
-smc_ret_values ffa_msg_wait(void);
-smc_ret_values ffa_error(int32_t error_code);
-smc_ret_values ffa_features(uint32_t feature);
-smc_ret_values ffa_partition_info_get(const uint32_t uuid[4]);
-smc_ret_values ffa_rx_release(void);
-smc_ret_values ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages);
-smc_ret_values ffa_mem_donate(uint32_t descriptor_length,
+struct ffa_value ffa_msg_send_direct_req64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4);
+
+struct ffa_value ffa_msg_send_direct_req32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4);
+
+struct ffa_value ffa_msg_send_direct_resp64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4);
+
+struct ffa_value ffa_msg_send_direct_resp32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4);
+
+struct ffa_value ffa_run(uint32_t dest_id, uint32_t vcpu_id);
+struct ffa_value ffa_version(uint32_t input_version);
+struct ffa_value ffa_id_get(void);
+struct ffa_value ffa_spm_id_get(void);
+struct ffa_value ffa_msg_wait(void);
+struct ffa_value ffa_error(int32_t error_code);
+struct ffa_value ffa_features(uint32_t feature);
+struct ffa_value ffa_features_with_input_property(uint32_t feature,
+ uint32_t param);
+struct ffa_value ffa_partition_info_get(const struct ffa_uuid uuid);
+struct ffa_value ffa_rx_release(void);
+struct ffa_value ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages);
+struct ffa_value ffa_rxtx_unmap(void);
+struct ffa_value ffa_mem_donate(uint32_t descriptor_length,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_lend(uint32_t descriptor_length,
uint32_t fragment_length);
-smc_ret_values ffa_mem_lend(uint32_t descriptor_length,
- uint32_t fragment_length);
-smc_ret_values ffa_mem_share(uint32_t descriptor_length,
- uint32_t fragment_length);
-smc_ret_values ffa_mem_retrieve_req(uint32_t descriptor_length,
- uint32_t fragment_length);
-smc_ret_values ffa_mem_relinquish(void);
-smc_ret_values ffa_mem_reclaim(uint64_t handle, uint32_t flags);
+struct ffa_value ffa_mem_share(uint32_t descriptor_length,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_retrieve_req(uint32_t descriptor_length,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_relinquish(void);
+struct ffa_value ffa_mem_reclaim(uint64_t handle, uint32_t flags);
+struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
+ uint32_t fragment_length);
+struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
+ uint32_t fragment_length);
+struct ffa_value ffa_notification_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count);
+struct ffa_value ffa_notification_bitmap_destroy(ffa_id_t vm_id);
+struct ffa_value ffa_notification_bind(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t notifications);
+struct ffa_value ffa_notification_unbind(ffa_id_t sender, ffa_id_t receiver,
+ ffa_notification_bitmap_t notifications);
+struct ffa_value ffa_notification_set(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap);
+struct ffa_value ffa_notification_get(ffa_id_t receiver, uint32_t vcpu_id,
+ uint32_t flags);
+struct ffa_value ffa_notification_info_get(void);
+
+struct ffa_value ffa_console_log(const char* message, size_t char_count);
+struct ffa_value ffa_partition_info_get_regs(const struct ffa_uuid uuid,
+ const uint16_t start_index,
+ const uint16_t tag);
+
+struct ffa_memory_access ffa_memory_access_init(
+ ffa_id_t receiver_id, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ ffa_memory_receiver_flags_t flags,
+ struct ffa_memory_access_impdef *impdef);
#endif /* __ASSEMBLY__ */
diff --git a/include/runtime_services/ffa_svc.h b/include/runtime_services/ffa_svc.h
index c97026583..226bbd11b 100644
--- a/include/runtime_services/ffa_svc.h
+++ b/include/runtime_services/ffa_svc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,17 +12,19 @@
#include <uuid.h>
/* FFA error codes. */
-#define FFA_ERROR_NOT_SUPPORTED -1
+#define FFA_ERROR_NOT_SUPPORTED -1
#define FFA_ERROR_INVALID_PARAMETER -2
#define FFA_ERROR_NO_MEMORY -3
#define FFA_ERROR_BUSY -4
#define FFA_ERROR_INTERRUPTED -5
#define FFA_ERROR_DENIED -6
-#define FFA_ERROR_RETRY -7
+#define FFA_ERROR_RETRY -7
+#define FFA_ERROR_ABORTED -8
+#define FFA_ERROR_NO_DATA -9
/* The macros below are used to identify FFA calls from the SMC function ID */
#define FFA_FNUM_MIN_VALUE U(0x60)
-#define FFA_FNUM_MAX_VALUE U(0x84)
+#define FFA_FNUM_MAX_VALUE U(0x8C)
#define is_ffa_fid(fid) __extension__ ({ \
__typeof__(fid) _fid = (fid); \
((GET_SMC_NUM(_fid) >= FFA_FNUM_MIN_VALUE) && \
@@ -32,7 +34,7 @@
#define FFA_VERSION_MAJOR U(1)
#define FFA_VERSION_MAJOR_SHIFT 16
#define FFA_VERSION_MAJOR_MASK U(0x7FFF)
-#define FFA_VERSION_MINOR U(0)
+#define FFA_VERSION_MINOR U(2)
#define FFA_VERSION_MINOR_SHIFT 0
#define FFA_VERSION_MINOR_MASK U(0xFFFF)
#define FFA_VERSION_BIT31_MASK U(1 << 31)
@@ -60,31 +62,51 @@
((func_num) << FUNCID_NUM_SHIFT))
/* FFA function numbers */
-#define FFA_FNUM_ERROR U(0x60)
-#define FFA_FNUM_SUCCESS U(0x61)
-#define FFA_FNUM_INTERRUPT U(0x62)
-#define FFA_FNUM_VERSION U(0x63)
-#define FFA_FNUM_FEATURES U(0x64)
-#define FFA_FNUM_RX_RELEASE U(0x65)
-#define FFA_FNUM_RXTX_MAP U(0x66)
-#define FFA_FNUM_RXTX_UNMAP U(0x67)
-#define FFA_FNUM_PARTITION_INFO_GET U(0x68)
-#define FFA_FNUM_ID_GET U(0x69)
-#define FFA_FNUM_MSG_POLL U(0x6A)
-#define FFA_FNUM_MSG_WAIT U(0x6B)
-#define FFA_FNUM_MSG_YIELD U(0x6C)
-#define FFA_FNUM_MSG_RUN U(0x6D)
-#define FFA_FNUM_MSG_SEND U(0x6E)
-#define FFA_FNUM_MSG_SEND_DIRECT_REQ U(0x6F)
-#define FFA_FNUM_MSG_SEND_DIRECT_RESP U(0x70)
-#define FFA_FNUM_MEM_DONATE U(0x71)
-#define FFA_FNUM_MEM_LEND U(0x72)
-#define FFA_FNUM_MEM_SHARE U(0x73)
-#define FFA_FNUM_MEM_RETRIEVE_REQ U(0x74)
-#define FFA_FNUM_MEM_RETRIEVE_RESP U(0x75)
-#define FFA_FNUM_MEM_RELINQUISH U(0x76)
-#define FFA_FNUM_MEM_RECLAIM U(0x77)
-#define FFA_FNUM_SECONDARY_EP_REGISTER U(0x84)
+#define FFA_FNUM_ERROR U(0x60)
+#define FFA_FNUM_SUCCESS U(0x61)
+#define FFA_FNUM_INTERRUPT U(0x62)
+#define FFA_FNUM_VERSION U(0x63)
+#define FFA_FNUM_FEATURES U(0x64)
+#define FFA_FNUM_RX_RELEASE U(0x65)
+#define FFA_FNUM_RXTX_MAP U(0x66)
+#define FFA_FNUM_RXTX_UNMAP U(0x67)
+#define FFA_FNUM_PARTITION_INFO_GET U(0x68)
+#define FFA_FNUM_ID_GET U(0x69)
+#define FFA_FNUM_MSG_POLL U(0x6A) /* Legacy FF-A v1.0 */
+#define FFA_FNUM_MSG_WAIT U(0x6B)
+#define FFA_FNUM_MSG_YIELD U(0x6C)
+#define FFA_FNUM_RUN U(0x6D)
+#define FFA_FNUM_MSG_SEND U(0x6E) /* Legacy FF-A v1.0 */
+#define FFA_FNUM_MSG_SEND_DIRECT_REQ U(0x6F)
+#define FFA_FNUM_MSG_SEND_DIRECT_RESP U(0x70)
+#define FFA_FNUM_MEM_DONATE U(0x71)
+#define FFA_FNUM_MEM_LEND U(0x72)
+#define FFA_FNUM_MEM_SHARE U(0x73)
+#define FFA_FNUM_MEM_RETRIEVE_REQ U(0x74)
+#define FFA_FNUM_MEM_RETRIEVE_RESP U(0x75)
+#define FFA_FNUM_MEM_RELINQUISH U(0x76)
+#define FFA_FNUM_MEM_RECLAIM U(0x77)
+#define FFA_FNUM_MEM_FRAG_RX U(0x7A)
+#define FFA_FNUM_MEM_FRAG_TX U(0x7B)
+#define FFA_FNUM_NORMAL_WORLD_RESUME U(0x7C)
+
+/* FF-A v1.1 */
+#define FFA_FNUM_NOTIFICATION_BITMAP_CREATE U(0x7D)
+#define FFA_FNUM_NOTIFICATION_BITMAP_DESTROY U(0x7E)
+#define FFA_FNUM_NOTIFICATION_BIND U(0x7F)
+#define FFA_FNUM_NOTIFICATION_UNBIND U(0x80)
+#define FFA_FNUM_NOTIFICATION_SET U(0x81)
+#define FFA_FNUM_NOTIFICATION_GET U(0x82)
+#define FFA_FNUM_NOTIFICATION_INFO_GET U(0x83)
+#define FFA_FNUM_RX_ACQUIRE U(0x84)
+#define FFA_FNUM_SPM_ID_GET U(0x85)
+#define FFA_FNUM_MSG_SEND2 U(0x86)
+#define FFA_FNUM_SECONDARY_EP_REGISTER U(0x87)
+
+/* FF-A v1.2 */
+#define FFA_FNUM_CONSOLE_LOG U(0x8A)
+#define FFA_FNUM_PARTITION_INFO_GET_REGS U(0x8B)
+#define FFA_FNUM_EL3_INTR_HANDLE U(0x8C)
/* FFA SMC32 FIDs */
#define FFA_ERROR FFA_FID(SMC_32, FFA_FNUM_ERROR)
@@ -100,7 +122,7 @@
#define FFA_MSG_POLL FFA_FID(SMC_32, FFA_FNUM_MSG_POLL)
#define FFA_MSG_WAIT FFA_FID(SMC_32, FFA_FNUM_MSG_WAIT)
#define FFA_MSG_YIELD FFA_FID(SMC_32, FFA_FNUM_MSG_YIELD)
-#define FFA_MSG_RUN FFA_FID(SMC_32, FFA_FNUM_MSG_RUN)
+#define FFA_RUN FFA_FID(SMC_32, FFA_FNUM_RUN)
#define FFA_MSG_SEND FFA_FID(SMC_32, FFA_FNUM_MSG_SEND)
#define FFA_MSG_SEND_DIRECT_REQ_SMC32 \
FFA_FID(SMC_32, FFA_FNUM_MSG_SEND_DIRECT_REQ)
@@ -113,7 +135,23 @@
FFA_FID(SMC_32, FFA_FNUM_MEM_RETRIEVE_REQ)
#define FFA_MEM_RETRIEVE_RESP FFA_FID(SMC_32, FFA_FNUM_MEM_RETRIEVE_RESP)
#define FFA_MEM_RELINQUISH FFA_FID(SMC_32, FFA_FNUM_MEM_RELINQUISH)
-#define FFA_MEM_RECLAIM FFA_FID(SMC_32, FFA_FNUM_MEM_RECLAIM)
+#define FFA_MEM_RECLAIM FFA_FID(SMC_32, FFA_FNUM_MEM_RECLAIM)
+#define FFA_MEM_FRAG_RX FFA_FID(SMC_32, FFA_FNUM_MEM_FRAG_RX)
+#define FFA_MEM_FRAG_TX FFA_FID(SMC_32, FFA_FNUM_MEM_FRAG_TX)
+#define FFA_NOTIFICATION_BITMAP_CREATE \
+ FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_BITMAP_CREATE)
+#define FFA_NOTIFICATION_BITMAP_DESTROY \
+ FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_BITMAP_DESTROY)
+#define FFA_NOTIFICATION_BIND FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_BIND)
+#define FFA_NOTIFICATION_UNBIND FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_UNBIND)
+#define FFA_NOTIFICATION_SET FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_SET)
+#define FFA_NOTIFICATION_GET FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_GET)
+#define FFA_NOTIFICATION_INFO_GET \
+ FFA_FID(SMC_32, FFA_FNUM_NOTIFICATION_INFO_GET)
+#define FFA_SPM_ID_GET FFA_FID(SMC_32, FFA_FNUM_SPM_ID_GET)
+
+/* Implementation defined SMC32 FIDs */
+#define FFA_CONSOLE_LOG_SMC32 FFA_FID(SMC_32, FFA_FNUM_CONSOLE_LOG)
/* FFA SMC64 FIDs */
#define FFA_SUCCESS_SMC64 FFA_FID(SMC_64, FFA_FNUM_SUCCESS)
@@ -129,7 +167,15 @@
FFA_FID(SMC_64, FFA_FNUM_MEM_RETRIEVE_REQ)
#define FFA_SECONDARY_EP_REGISTER_SMC64 \
FFA_FID(SMC_64, FFA_FNUM_SECONDARY_EP_REGISTER)
+#define FFA_NOTIFICATION_INFO_GET_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_NOTIFICATION_INFO_GET)
+
+#define FFA_FEATURES_MEM_RETRIEVE_REQ_NS_SUPPORT (UINT32_C(1) << 1)
+#define FFA_PARTITION_INFO_GET_REGS_SMC64 \
+ FFA_FID(SMC_64, FFA_FNUM_PARTITION_INFO_GET_REGS)
+/* Implementation defined SMC64 FIDs */
+#define FFA_CONSOLE_LOG_SMC64 FFA_FID(SMC_64, FFA_FNUM_CONSOLE_LOG)
/*
* Reserve a special value for traffic targeted to the Hypervisor or SPM.
*/
diff --git a/include/runtime_services/host_realm_managment/host_realm_helper.h b/include/runtime_services/host_realm_managment/host_realm_helper.h
new file mode 100644
index 000000000..9f6d934de
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/host_realm_helper.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef HOST_REALM_HELPER_H
+#define HOST_REALM_HELPER_H
+
+#include <stdlib.h>
+#include <host_realm_rmi.h>
+#include <tftf_lib.h>
+
+/*
+ * Creates realm, initializes heap and creates RTTs
+ */
+bool host_prepare_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count);
+
+/*
+ * Creates realm, initializes heap, creates RTTs and also
+ * Creates recs
+ */
+bool host_create_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count);
+
+/*
+ * Creates realm, initializes heap, creates RTTs,
+ * creates recs and activate realm
+ */
+bool host_create_activate_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count);
+bool host_create_shared_mem(struct realm *realm_ptr,
+ u_register_t ns_shared_mem_adr,
+ u_register_t ns_shared_mem_size);
+bool host_destroy_realm(struct realm *realm_ptr);
+void host_rec_send_sgi(struct realm *realm_ptr,
+ unsigned int sgi, unsigned int rec_num);
+bool host_enter_realm_execute(struct realm *realm_ptr, uint8_t cmd,
+ int test_exit_reason, unsigned int rec_num);
+test_result_t host_cmp_result(void);
+void realm_print_handler(struct realm *realm_ptr, unsigned int rec_num);
+
+#endif /* HOST_REALM_HELPER_H */
diff --git a/include/runtime_services/host_realm_managment/host_realm_mem_layout.h b/include/runtime_services/host_realm_managment/host_realm_mem_layout.h
new file mode 100644
index 000000000..2c3a280d3
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/host_realm_mem_layout.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef HOST_REALM_MEM_LAYOUT_H
+#define HOST_REALM_MEM_LAYOUT_H
+
+#include <realm_def.h>
+
+/*
+ * Realm payload Memory Usage Layout in TFTF.bin.
+ * The realm.bin is appended to tftf.bin to create a unified
+ * tftf.bin.
+ * +---------------------------+
+ * | TFTF.bin |
+ * | |
+ * +---------------------------+
+ * | Realm Image |
+ * | (REALM_MAX_LOAD_IMG_SIZE |
+ * +---------------------------+
+ *
+ * The realm memory pool is a combination of PAGE_POOL and NS_SHARED_MEM
+ * +--------------------------------+ +---------------------------+
+ * | Memory Pool | | Heap Memory |
+ * | (NS_REALM_SHARED_MEM_SIZE * | | (PAGE_POOL_MAX_SIZE) |
+ * | MAX_REALM_COUNT) | | .... |
+ * | + PAGE_POOL_MAX_SIZE * | ==> | (PAGE_POOL_MAX_SIZE) |
+ * | MAX_REALM_COUNT) | | |
+ * | | | |
+ * | | +---------------------------+
+ * | | | Shared Region |
+ * | | | (NS_REALM_SHARED_MEM_SIZE)|
+ * | | | .... |
+ * | | | (NS_REALM_SHARED_MEM_SIZE)|
+ * +--------------------------------+ +---------------------------+*
+ * Refer to tftf.lds for the layout.
+ */
+
+#if !(defined(__LINKER__) || defined(__ASSEMBLY__))
+ /* Base address of each section */
+ IMPORT_SYM(uintptr_t, __REALM_PAYLOAD_START__, REALM_IMAGE_BASE);
+ IMPORT_SYM(uintptr_t, __REALM_POOL_START__, PAGE_POOL_BASE);
+ IMPORT_SYM(uintptr_t, __REALM_POOL_END__, PAGE_POOL_END);
+#define NS_REALM_SHARED_MEM_BASE (PAGE_POOL_BASE + (PAGE_POOL_MAX_SIZE * MAX_REALM_COUNT))
+#endif
+
+#ifdef ENABLE_REALM_PAYLOAD_TESTS
+ /* 1MB for shared buffer between Realm and Host */
+ #define NS_REALM_SHARED_MEM_SIZE U(0x100000)
+ /* 3MB of memory used as a pool for realm's objects creation */
+ #define PAGE_POOL_MAX_SIZE U(0x300000)
+#else
+ #define NS_REALM_SHARED_MEM_SIZE U(0x0)
+ #define PAGE_POOL_MAX_SIZE U(0x0)
+#endif
+
+#endif /* HOST_REALM_MEM_LAYOUT_H */
diff --git a/include/runtime_services/host_realm_managment/host_realm_pmu.h b/include/runtime_services/host_realm_managment/host_realm_pmu.h
new file mode 100644
index 000000000..844bb29fd
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/host_realm_pmu.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef HOST_REALM_PMU_H
+#define HOST_REALM_PMU_H
+
+#include <arch_helpers.h>
+
+/* PMU physical interrupt */
+#define PMU_PPI 23UL
+
+/* PMU virtual interrupt */
+#define PMU_VIRQ PMU_PPI
+
+/* Clear bits P0-P30, C and F0 */
+#define PMU_CLEAR_ALL 0x1FFFFFFFF
+
+/* Number of event counters implemented */
+#define GET_PMU_CNT \
+ ((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK)
+
+void host_set_pmu_state(void);
+bool host_check_pmu_state(void);
+
+#endif /* HOST_REALM_PMU_H */
diff --git a/include/runtime_services/host_realm_managment/host_realm_rmi.h b/include/runtime_services/host_realm_managment/host_realm_rmi.h
new file mode 100644
index 000000000..55233a15d
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/host_realm_rmi.h
@@ -0,0 +1,613 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef HOST_REALM_RMI_H
+#define HOST_REALM_RMI_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <realm_def.h>
+#include <smccc.h>
+#include <utils_def.h>
+
+#define RMI_FNUM_MIN_VALUE U(0x150)
+#define RMI_FNUM_MAX_VALUE U(0x18F)
+
+/* Get RMI fastcall std FID from offset */
+#define SMC64_RMI_FID(_offset) \
+ ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
+ (SMC_64 << FUNCID_CC_SHIFT) | \
+ (OEN_STD_START << FUNCID_OEN_SHIFT) | \
+ (((RMI_FNUM_MIN_VALUE + (_offset)) & FUNCID_NUM_MASK) \
+ << FUNCID_NUM_SHIFT))
+
+#define RMI_ABI_VERSION_GET_MAJOR(_version) (((_version) >> 16U) & 0x8FFF)
+#define RMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
+#define RMI_ABI_VERSION_MAJOR U(1)
+#define RMI_ABI_VERSION_MINOR U(0)
+#define RMI_ABI_VERSION_VAL ((RMI_ABI_VERSION_MAJOR << 16U) | \
+ RMI_ABI_VERSION_MINOR)
+
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define __ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1U)
+#define ALIGN(x, a) __ALIGN((x), (a))
+
+/*
+ * SMC_RMM_INIT_COMPLETE is the only function in the RMI that originates from
+ * the Realm world and is handled by the RMMD. The remaining functions are
+ * always invoked by the Normal world, forwarded by RMMD and handled by the
+ * RMM
+ */
+/* RMI SMC64 FIDs handled by the RMMD */
+/* no parameters */
+#define RMI_VERSION SMC64_RMI_FID(U(0x0))
+
+/*
+ * arg0 == target granule address
+ */
+#define RMI_GRANULE_DELEGATE SMC64_RMI_FID(U(0x1))
+
+/*
+ * arg0 == target granule address
+ */
+#define RMI_GRANULE_UNDELEGATE SMC64_RMI_FID(U(0x2))
+
+/*
+ * arg0 == RD address
+ * arg1 == data address
+ * arg2 == map address
+ * arg3 == SRC address
+ */
+#define RMI_DATA_CREATE SMC64_RMI_FID(U(0x3))
+
+/*
+ * arg0 == RD address
+ * arg1 == data address
+ * arg2 == map address
+ */
+#define RMI_DATA_CREATE_UNKNOWN SMC64_RMI_FID(U(0x4))
+
+/*
+ * arg0 == RD address
+ * arg1 == map address
+ *
+ * ret1 == Address(PA) of the DATA granule, if ret0 == RMI_SUCCESS.
+ * Otherwise, undefined.
+ * ret2 == Top of the non-live address region. Only valid
+ * if ret0 == RMI_SUCCESS or ret0 == (RMI_ERROR_RTT_WALK, x)
+ */
+#define RMI_DATA_DESTROY SMC64_RMI_FID(U(0x5))
+
+/*
+ * arg0 == RD address
+ */
+#define RMI_REALM_ACTIVATE SMC64_RMI_FID(U(0x7))
+
+/*
+ * arg0 == RD address
+ * arg1 == struct rmi_realm_params address
+ */
+#define RMI_REALM_CREATE SMC64_RMI_FID(U(0x8))
+
+/*
+ * arg0 == RD address
+ */
+#define RMI_REALM_DESTROY SMC64_RMI_FID(U(0x9))
+
+/*
+ * arg0 == RD address
+ * arg1 == REC address
+ * arg2 == struct rmm_rec address
+ */
+#define RMI_REC_CREATE SMC64_RMI_FID(U(0xA))
+
+/*
+ * arg0 == REC address
+ */
+#define RMI_REC_DESTROY SMC64_RMI_FID(U(0xB))
+
+/*
+ * arg0 == rec address
+ * arg1 == struct rec_run address
+ */
+#define RMI_REC_ENTER SMC64_RMI_FID(U(0xC))
+
+/*
+ * arg0 == RD address
+ * arg1 == RTT address
+ * arg2 == map address
+ * arg3 == level
+ */
+#define RMI_RTT_CREATE SMC64_RMI_FID(U(0xD))
+
+/*
+ * arg0 == RD address
+ * arg1 == map address
+ * arg2 == level
+ *
+ * ret1 == Address (PA) of the RTT, if ret0 == RMI_SUCCESS
+ * Otherwise, undefined.
+ * ret2 == Top of the non-live address region. Only valid
+ * if ret0 == RMI_SUCCESS or ret0 == (RMI_ERROR_RTT_WALK, x)
+ */
+#define RMI_RTT_DESTROY SMC64_RMI_FID(U(0xE))
+
+/*
+ * arg0 == RD address
+ * arg1 == map address
+ * arg2 == level
+ * arg3 == s2tte
+ */
+#define RMI_RTT_MAP_UNPROTECTED SMC64_RMI_FID(U(0xF))
+
+/*
+ * arg0 == RD address
+ * arg1 == map address
+ * arg2 == level
+ * ret1 == level
+ * ret2 == s2tte type
+ * ret3 == s2tte
+ * ret4 == ripas
+ */
+#define RMI_RTT_READ_ENTRY SMC64_RMI_FID(U(0x11))
+
+/*
+ * arg0 == RD address
+ * arg1 == map address
+ * arg2 == level
+ */
+#define RMI_RTT_UNMAP_UNPROTECTED SMC64_RMI_FID(U(0x12))
+
+/*
+ * arg0 == calling rec address
+ * arg1 == target rec address
+ */
+#define RMI_PSCI_COMPLETE SMC64_RMI_FID(U(0x14))
+
+/*
+ * arg0 == Feature register index
+ */
+#define RMI_FEATURES SMC64_RMI_FID(U(0x15))
+
+/*
+ * arg0 == RD address
+ * arg1 == map address
+ * arg2 == level
+ *
+ * ret1 == Address(PA) of the RTT folded, if ret0 == RMI_SUCCESS
+ */
+#define RMI_RTT_FOLD SMC64_RMI_FID(U(0x16))
+
+/*
+ * arg0 == RD address
+ */
+#define RMI_REC_AUX_COUNT SMC64_RMI_FID(U(0x17))
+
+/*
+ * arg0 == RD address
+ * arg1 == map address
+ * arg2 == level
+ */
+#define RMI_RTT_INIT_RIPAS SMC64_RMI_FID(U(0x18))
+
+/*
+ * arg0 == RD address
+ * arg1 == REC address
+ * arg2 == map address
+ * arg3 == level
+ * arg4 == ripas
+ */
+#define RMI_RTT_SET_RIPAS SMC64_RMI_FID(U(0x19))
+
+#define GRANULE_SIZE PAGE_SIZE_4KB
+
+/* Maximum number of auxiliary granules required for a REC */
+#define MAX_REC_AUX_GRANULES 16U
+#define REC_PARAMS_AUX_GRANULES 16U
+#define REC_EXIT_NR_GPRS 31U
+
+/* Size of Realm Personalization Value */
+#define RPV_SIZE 64U
+
+/* RmiDisposeResponse types */
+#define RMI_DISPOSE_ACCEPT 0U
+#define RMI_DISPOSE_REJECT 1U
+
+/* RmiFeature enumerations */
+#define RMI_FEATURE_FALSE 0U
+#define RMI_FEATURE_TRUE 1U
+
+/* RmiRealmFlags format */
+#define RMI_REALM_FLAGS_LPA2 BIT(0)
+#define RMI_REALM_FLAGS_SVE BIT(1)
+#define RMI_REALM_FLAGS_PMU BIT(2)
+
+/* RmiInterfaceVersion type */
+#define RMI_MAJOR_VERSION 0U
+#define RMI_MINOR_VERSION 0U
+
+/* RmiRealmMeasurementAlgorithm types */
+#define RMI_HASH_SHA_256 0U
+#define RMI_HASH_SHA_512 1U
+
+/* RmiRecEmulatedMmio types */
+#define RMI_NOT_EMULATED_MMIO 0U
+#define RMI_EMULATED_MMIO 1U
+
+/*
+ * RmiRecExitReason represents the reason for a REC exit.
+ * This is returned to NS hosts via RMI_REC_ENTER::run_ptr.
+ */
+#define RMI_EXIT_SYNC 0U
+#define RMI_EXIT_IRQ 1U
+#define RMI_EXIT_FIQ 2U
+#define RMI_EXIT_PSCI 3U
+#define RMI_EXIT_RIPAS_CHANGE 4U
+#define RMI_EXIT_HOST_CALL 5U
+#define RMI_EXIT_SERROR 6U
+#define RMI_EXIT_INVALID (RMI_EXIT_SERROR + 1U)
+
+/* RmiRecRunnable types */
+#define RMI_NOT_RUNNABLE 0U
+#define RMI_RUNNABLE 1U
+
+/* RmiRttEntryState: represents the state of an RTTE */
+#define RMI_UNASSIGNED UL(0)
+#define RMI_ASSIGNED UL(1)
+#define RMI_TABLE UL(2)
+
+/* RmmRipas enumeration representing realm IPA state */
+#define RMI_EMPTY UL(0)
+#define RMI_RAM UL(1)
+#define RMI_DESTROYED UL(2)
+
+/* RmiPmuOverflowStatus enumeration representing PMU overflow status */
+#define RMI_PMU_OVERFLOW_NOT_ACTIVE 0U
+#define RMI_PMU_OVERFLOW_ACTIVE 1U
+
+/* RmiFeatureRegister0 format */
+#define RMI_FEATURE_REGISTER_0_S2SZ_SHIFT 0UL
+#define RMI_FEATURE_REGISTER_0_S2SZ_WIDTH 8UL
+#define RMI_FEATURE_REGISTER_0_LPA2 BIT(8)
+#define RMI_FEATURE_REGISTER_0_SVE_EN BIT(9)
+#define RMI_FEATURE_REGISTER_0_SVE_VL_SHIFT 10UL
+#define RMI_FEATURE_REGISTER_0_SVE_VL_WIDTH 4UL
+#define RMI_FEATURE_REGISTER_0_NUM_BPS_SHIFT 14UL
+#define RMI_FEATURE_REGISTER_0_NUM_BPS_WIDTH 4UL
+#define RMI_FEATURE_REGISTER_0_NUM_WPS_SHIFT 18UL
+#define RMI_FEATURE_REGISTER_0_NUM_WPS_WIDTH 4UL
+#define RMI_FEATURE_REGISTER_0_PMU_EN BIT(22)
+#define RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS_SHIFT 23UL
+#define RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS_WIDTH 5UL
+#define RMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28)
+#define RMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29)
+
+/*
+ * Format of feature_flag[63:32].
+ * Value -1 indicates not set field, and parameter will be set
+ * from the corresponding field of feature register 0.
+ */
+#define FEATURE_SVE_VL_SHIFT 32UL
+#define FEATURE_SVE_VL_WIDTH 8UL
+#define FEATURE_NUM_BPS_SHIFT 40UL
+#define FEATURE_NUM_BPS_WIDTH 8UL
+#define FEATURE_NUM_WPS_SHIFT 48UL
+#define FEATURE_NUM_WPS_WIDTH 8UL
+#define FEATURE_PMU_NUM_CTRS_SHIFT 56UL
+#define FEATURE_PMU_NUM_CTRS_WIDTH 8UL
+
+/* RmiStatusCode types */
+/*
+ * Status codes which can be returned from RMM commands.
+ *
+ * For each code, the meaning of return_code_t::index is stated.
+ */
+typedef enum {
+ /*
+ * Command completed successfully.
+ *
+ * index is zero.
+ */
+ RMI_SUCCESS = 0,
+ /*
+ * The value of a command input value caused the command to fail.
+ *
+ * index is zero.
+ */
+ RMI_ERROR_INPUT = 1,
+ /*
+ * An attribute of a Realm does not match the expected value.
+ *
+ * index varies between usages.
+ */
+ RMI_ERROR_REALM = 2,
+ /*
+ * An attribute of a REC does not match the expected value.
+ *
+ * index is zero.
+ */
+ RMI_ERROR_REC = 3,
+ /*
+ * An RTT walk terminated before reaching the target RTT level,
+ * or reached an RTTE with an unexpected value.
+ *
+ * index: RTT level at which the walk terminated
+ */
+ RMI_ERROR_RTT = 4,
+ RMI_ERROR_COUNT
+} status_t;
+
+#define RMI_RETURN_STATUS(ret) ((ret) & 0xFF)
+#define RMI_RETURN_INDEX(ret) (((ret) >> 8U) & 0xFF)
+#define RTT_MAX_LEVEL 3U
+#define ALIGN_DOWN(x, a) ((uint64_t)(x) & ~(((uint64_t)(a)) - 1ULL))
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a)-1U)) == 0U)
+#define PAGE_SHIFT FOUR_KB_SHIFT
+#define RTT_LEVEL_SHIFT(l) XLAT_ADDR_SHIFT(l)
+#define RTT_L2_BLOCK_SIZE (1UL << RTT_LEVEL_SHIFT(2U))
+#define RTT_MAP_SIZE(level) (1UL << RTT_LEVEL_SHIFT(level))
+
+#define REC_CREATE_NR_GPRS 8U
+#define REC_HVC_NR_GPRS 7U
+#define REC_GIC_NUM_LRS 16U
+
+/*
+ * The Realm attribute parameters are shared by the Host via
+ * RMI_REALM_CREATE::params_ptr. The values can be observed or modified
+ * either by the Host or by the Realm.
+ */
+struct rmi_realm_params {
+ /* Flags */
+ SET_MEMBER(unsigned long flags, 0, 0x8); /* Offset 0 */
+ /* Requested IPA width */
+ SET_MEMBER(unsigned int s2sz, 0x8, 0x10); /* 0x8 */
+ /* Requested SVE vector length */
+ SET_MEMBER(unsigned int sve_vl, 0x10, 0x18); /* 0x10 */
+ /* Requested number of breakpoints */
+ SET_MEMBER(unsigned int num_bps, 0x18, 0x20); /* 0x18 */
+ /* Requested number of watchpoints */
+ SET_MEMBER(unsigned int num_wps, 0x20, 0x28); /* 0x20 */
+ /* Requested number of PMU counters */
+ SET_MEMBER(unsigned int pmu_num_ctrs, 0x28, 0x30); /* 0x28 */
+ /* Measurement algorithm */
+ SET_MEMBER(unsigned char hash_algo, 0x30, 0x400); /* 0x30 */
+ /* Realm Personalization Value */
+ SET_MEMBER(unsigned char rpv[RPV_SIZE], 0x400, 0x800); /* 0x400 */
+ SET_MEMBER(struct {
+ /* Virtual Machine Identifier */
+ unsigned short vmid; /* 0x800 */
+ /* Realm Translation Table base */
+ u_register_t rtt_base; /* 0x808 */
+ /* RTT starting level */
+ long rtt_level_start; /* 0x810 */
+ /* Number of starting level RTTs */
+ unsigned int rtt_num_start; /* 0x818 */
+ }, 0x800, 0x1000);
+};
+
+/*
+ * The REC attribute parameters are shared by the Host via
+ * MI_REC_CREATE::params_ptr. The values can be observed or modified
+ * either by the Host or by the Realm which owns the REC.
+ */
+struct rmi_rec_params {
+ /* Flags */
+ SET_MEMBER(u_register_t flags, 0, 0x100); /* Offset 0 */
+ /* MPIDR of the REC */
+ SET_MEMBER(u_register_t mpidr, 0x100, 0x200); /* 0x100 */
+ /* Program counter */
+ SET_MEMBER(u_register_t pc, 0x200, 0x300); /* 0x200 */
+ /* General-purpose registers */
+ SET_MEMBER(u_register_t gprs[REC_CREATE_NR_GPRS], 0x300, 0x800); /* 0x300 */
+ SET_MEMBER(struct {
+ /* Number of auxiliary Granules */
+ u_register_t num_aux; /* 0x800 */
+ /* Addresses of auxiliary Granules */
+ u_register_t aux[MAX_REC_AUX_GRANULES]; /* 0x808 */
+ }, 0x800, 0x1000);
+};
+
+/* Whether Host has completed emulation for an Emulatable Data Abort */
+#define REC_ENTRY_FLAG_EMUL_MMIO (UL(1) << 0)
+
+/* Whether to inject a Synchronous External Abort into Realm */
+#define REC_ENTRY_FLAG_INJECT_SEA (UL(1) << 1)
+
+/* Whether to trap WFI/WFE execution by Realm */
+#define REC_ENTRY_FLAG_TRAP_WFI (UL(1) << 2)
+#define REC_ENTRY_FLAG_TRAP_WFE (UL(1) << 3)
+
+/* Host response to RIPAS change request */
+#define REC_ENTRY_FLAG_RIPAS_RESPONSE_REJECT (UL(1) << 4)
+
+/*
+ * Structure contains data passed from the Host to the RMM on REC entry
+ */
+struct rmi_rec_entry {
+ /* Flags */
+ SET_MEMBER(u_register_t flags, 0, 0x200); /* Offset 0 */
+ /* General-purpose registers */
+ SET_MEMBER(u_register_t gprs[REC_EXIT_NR_GPRS], 0x200, 0x300); /* 0x200 */
+ SET_MEMBER(struct {
+ /* GICv3 Hypervisor Control Register */
+ u_register_t gicv3_hcr; /* 0x300 */
+ /* GICv3 List Registers */
+ u_register_t gicv3_lrs[REC_GIC_NUM_LRS]; /* 0x308 */
+ }, 0x300, 0x800);
+};
+
+/*
+ * Structure contains data passed from the RMM to the Host on REC exit
+ */
+struct rmi_rec_exit {
+ /* Exit reason */
+ SET_MEMBER(u_register_t exit_reason, 0, 0x100);/* Offset 0 */
+ SET_MEMBER(struct {
+ /* Exception Syndrome Register */
+ u_register_t esr; /* 0x100 */
+ /* Fault Address Register */
+ u_register_t far; /* 0x108 */
+ /* Hypervisor IPA Fault Address register */
+ u_register_t hpfar; /* 0x110 */
+ }, 0x100, 0x200);
+ /* General-purpose registers */
+ SET_MEMBER(u_register_t gprs[REC_EXIT_NR_GPRS], 0x200, 0x300); /* 0x200 */
+ SET_MEMBER(struct {
+ /* GICv3 Hypervisor Control Register */
+ u_register_t gicv3_hcr; /* 0x300 */
+ /* GICv3 List Registers */
+ u_register_t gicv3_lrs[REC_GIC_NUM_LRS]; /* 0x308 */
+ /* GICv3 Maintenance Interrupt State Register */
+ u_register_t gicv3_misr; /* 0x388 */
+ /* GICv3 Virtual Machine Control Register */
+ u_register_t gicv3_vmcr; /* 0x390 */
+ }, 0x300, 0x400);
+ SET_MEMBER(struct {
+ /* Counter-timer Physical Timer Control Register */
+ u_register_t cntp_ctl; /* 0x400 */
+ /* Counter-timer Physical Timer CompareValue Register */
+ u_register_t cntp_cval; /* 0x408 */
+ /* Counter-timer Virtual Timer Control Register */
+ u_register_t cntv_ctl; /* 0x410 */
+ /* Counter-timer Virtual Timer CompareValue Register */
+ u_register_t cntv_cval; /* 0x418 */
+ }, 0x400, 0x500);
+ SET_MEMBER(struct {
+ /* Base address of pending RIPAS change */
+ u_register_t ripas_base; /* 0x500 */
+ /* Size of pending RIPAS change */
+ u_register_t ripas_size; /* 0x508 */
+ /* RIPAS value of pending RIPAS change */
+ unsigned char ripas_value; /* 0x510 */
+ }, 0x500, 0x600);
+ /* Host call immediate value */
+ SET_MEMBER(unsigned int imm, 0x600, 0x700); /* 0x600 */
+ /* PMU overflow status */
+ SET_MEMBER(unsigned long pmu_ovf_status, 0x700, 0x800); /* 0x700 */
+};
+
+/*
+ * Structure contains shared information between RMM and Host
+ * during REC entry and REC exit.
+ */
+struct rmi_rec_run {
+ /* Entry information */
+ SET_MEMBER(struct rmi_rec_entry entry, 0, 0x800); /* Offset 0 */
+ /* Exit information */
+ SET_MEMBER(struct rmi_rec_exit exit, 0x800, 0x1000); /* 0x800 */
+};
+
+struct rtt_entry {
+ uint64_t walk_level;
+ uint64_t out_addr;
+ u_register_t state;
+ u_register_t ripas;
+};
+
+enum realm_state {
+ REALM_STATE_NULL,
+ REALM_STATE_NEW,
+ REALM_STATE_ACTIVE,
+ REALM_STATE_SYSTEM_OFF
+};
+
+struct realm {
+ u_register_t host_shared_data;
+ unsigned int rec_count;
+ u_register_t par_base;
+ u_register_t par_size;
+ u_register_t rd;
+ u_register_t rtt_addr;
+ u_register_t rec[MAX_REC_COUNT];
+ u_register_t run[MAX_REC_COUNT];
+ u_register_t rec_flag[MAX_REC_COUNT];
+ u_register_t mpidr[MAX_REC_COUNT];
+ u_register_t host_mpidr[MAX_REC_COUNT];
+ u_register_t num_aux;
+ u_register_t rmm_feat_reg0;
+ u_register_t ipa_ns_buffer;
+ u_register_t ns_buffer_size;
+ u_register_t aux_pages_all_rec[MAX_REC_COUNT][REC_PARAMS_AUX_GRANULES];
+ uint8_t sve_vl;
+ uint8_t num_bps;
+ uint8_t num_wps;
+ uint8_t pmu_num_ctrs;
+ bool payload_created;
+ bool shared_mem_created;
+ unsigned short vmid;
+ enum realm_state state;
+};
+
+/* RMI/SMC */
+u_register_t host_rmi_version(u_register_t req_ver);
+u_register_t host_rmi_granule_delegate(u_register_t addr);
+u_register_t host_rmi_granule_undelegate(u_register_t addr);
+u_register_t host_rmi_realm_create(u_register_t rd, u_register_t params_ptr);
+u_register_t host_rmi_realm_destroy(u_register_t rd);
+u_register_t host_rmi_features(u_register_t index, u_register_t *features);
+u_register_t host_rmi_data_destroy(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t *data,
+ u_register_t *top);
+u_register_t host_rmi_rtt_readentry(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ struct rtt_entry *rtt);
+u_register_t host_rmi_rtt_destroy(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *rtt,
+ u_register_t *top);
+u_register_t host_rmi_rtt_init_ripas(u_register_t rd,
+ u_register_t start,
+ u_register_t end,
+ u_register_t *top);
+u_register_t host_rmi_create_rtt_levels(struct realm *realm,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t max_level);
+u_register_t host_rmi_rtt_unmap_unprotected(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *top);
+u_register_t host_rmi_rtt_set_ripas(u_register_t rd,
+ u_register_t rec,
+ u_register_t start,
+ u_register_t end,
+ u_register_t *top);
+u_register_t host_rmi_psci_complete(u_register_t calling_rec, u_register_t target_rec,
+ unsigned long status);
+void host_rmi_init_cmp_result(void);
+bool host_rmi_get_cmp_result(void);
+
+/* Realm management */
+u_register_t host_realm_create(struct realm *realm);
+u_register_t host_realm_map_payload_image(struct realm *realm,
+ u_register_t realm_payload_adr);
+u_register_t host_realm_map_ns_shared(struct realm *realm,
+ u_register_t ns_shared_mem_adr,
+ u_register_t ns_shared_mem_size);
+u_register_t host_realm_rec_create(struct realm *realm);
+unsigned int host_realm_find_rec_by_mpidr(unsigned int mpidr, struct realm *realm);
+u_register_t host_realm_activate(struct realm *realm);
+u_register_t host_realm_destroy(struct realm *realm);
+u_register_t host_realm_rec_enter(struct realm *realm,
+ u_register_t *exit_reason,
+ unsigned int *host_call_result,
+ unsigned int rec_num);
+u_register_t host_realm_init_ipa_state(struct realm *realm, u_register_t level,
+ u_register_t start, uint64_t end);
+u_register_t host_realm_delegate_map_protected_data(bool unknown,
+ struct realm *realm,
+ u_register_t target_pa,
+ u_register_t map_size,
+ u_register_t src_pa);
+u_register_t host_realm_map_unprotected(struct realm *realm, u_register_t ns_pa,
+ u_register_t map_size);
+
+#endif /* HOST_REALM_RMI_H */
diff --git a/include/runtime_services/host_realm_managment/host_realm_simd.h b/include/runtime_services/host_realm_managment/host_realm_simd.h
new file mode 100644
index 000000000..9680a9614
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/host_realm_simd.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef HOST_REALM_SIMD_H
+#define HOST_REALM_SIMD_H
+
+#include <stdint.h>
+
+struct sve_cmd_rdvl {
+ uint64_t rdvl;
+};
+
+struct sve_cmd_id_regs {
+ uint64_t id_aa64pfr0_el1;
+ uint64_t id_aa64zfr0_el1;
+};
+
+struct sve_cmd_probe_vl {
+ uint32_t vl_bitmap;
+};
+
+struct sme_cmd_id_regs {
+ uint64_t id_aa64pfr1_el1;
+ uint64_t id_aa64smfr0_el1;
+};
+
+#endif /* HOST_REALM_SIMD_H */
diff --git a/include/runtime_services/host_realm_managment/host_shared_data.h b/include/runtime_services/host_realm_managment/host_shared_data.h
new file mode 100644
index 000000000..7cfffe68b
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/host_shared_data.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef HOST_SHARED_DATA_H
+#define HOST_SHARED_DATA_H
+
+#include <stdint.h>
+
+#include <host_realm_rmi.h>
+#include <spinlock.h>
+
+#define MAX_BUF_SIZE 10240U
+#define MAX_DATA_SIZE 5U
+
+#define REALM_CMD_BUFFER_SIZE 1024U
+
+/*
+ * This structure maps the shared memory to be used between the Host and Realm
+ * payload
+ */
+typedef struct host_shared_data {
+ /* Buffer used from Realm for logging */
+ uint8_t log_buffer[MAX_BUF_SIZE];
+
+ /* Command set from Host and used by Realm */
+ uint8_t realm_cmd;
+
+ /* array of params passed from Host to Realm */
+ u_register_t host_param_val[MAX_DATA_SIZE];
+
+ /* array of output results passed from Realm to Host */
+ u_register_t realm_out_val[MAX_DATA_SIZE];
+
+ /* Buffer to save Realm command results */
+ uint8_t realm_cmd_output_buffer[REALM_CMD_BUFFER_SIZE];
+} host_shared_data_t;
+
+/*
+ * Different commands that the Host can requests the Realm to perform
+ */
+enum realm_cmd {
+ REALM_SLEEP_CMD = 1U,
+ REALM_LOOP_CMD,
+ REALM_MULTIPLE_REC_PSCI_DENIED_CMD,
+ REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ REALM_GET_RSI_VERSION,
+ REALM_INSTR_FETCH_CMD,
+ REALM_DATA_ACCESS_CMD,
+ REALM_PMU_CYCLE,
+ REALM_PMU_COUNTER,
+ REALM_PMU_EVENT,
+ REALM_PMU_PRESERVE,
+ REALM_PMU_INTERRUPT,
+ REALM_REQ_FPU_FILL_CMD,
+ REALM_REQ_FPU_CMP_CMD,
+ REALM_SET_RIPAS_CMD,
+ REALM_REJECT_SET_RIPAS_CMD,
+ REALM_SVE_RDVL,
+ REALM_SVE_ID_REGISTERS,
+ REALM_SVE_PROBE_VL,
+ REALM_SVE_OPS,
+ REALM_SVE_FILL_REGS,
+ REALM_SVE_CMP_REGS,
+ REALM_SVE_UNDEF_ABORT,
+ REALM_PAUTH_SET_CMD,
+ REALM_PAUTH_CHECK_CMD,
+ REALM_PAUTH_FAULT,
+ REALM_DIT_CHECK_CMD,
+ REALM_SME_ID_REGISTERS,
+ REALM_SME_UNDEF_ABORT
+};
+
+/*
+ * Index values for each parameter in the host_param_val array.
+ */
+enum host_param_index {
+ HOST_CMD_INDEX = 0U,
+ HOST_ARG1_INDEX,
+ HOST_ARG2_INDEX
+};
+
+enum host_call_cmd {
+ HOST_CALL_GET_SHARED_BUFF_CMD = 1U,
+ HOST_CALL_EXIT_SUCCESS_CMD,
+ HOST_CALL_EXIT_FAILED_CMD,
+ HOST_CALL_EXIT_PRINT_CMD
+};
+
+/***************************************
+ * APIs to be invoked from Host side *
+ ***************************************/
+
+/*
+ * Return shared buffer pointer mapped as host_shared_data_t structure
+ */
+host_shared_data_t *host_get_shared_structure(struct realm *realm_ptr, unsigned int rec_num);
+
+/*
+ * Set data to be shared from Host to realm
+ */
+void host_shared_data_set_host_val(struct realm *realm_ptr,
+ unsigned int rec_num, uint8_t index, u_register_t val);
+
+/*
+ * Get data shared from realm to Host
+ */
+u_register_t host_shared_data_get_realm_val(struct realm *realm_ptr,
+ unsigned int rec_num, uint8_t index);
+
+/*
+ * Set command to be send from Host to realm
+ */
+void host_shared_data_set_realm_cmd(struct realm *realm_ptr, uint8_t cmd,
+ unsigned int rec_num);
+
+
+/****************************************
+ * APIs to be invoked from Realm side *
+ ****************************************/
+
+/*
+ * Set guest mapped shared buffer pointer
+ */
+void realm_set_shared_structure(host_shared_data_t *ptr);
+
+/*
+ * Get guest mapped shared buffer pointer
+ */
+host_shared_data_t *realm_get_my_shared_structure(void);
+
+/*
+ * Return Host's data at index
+ */
+u_register_t realm_shared_data_get_my_host_val(uint8_t index);
+
+/*
+ * Get command sent from Host to my Rec
+ */
+uint8_t realm_shared_data_get_my_realm_cmd(void);
+
+/*
+ * Set data to be shared from my Rec to Host
+ */
+void realm_shared_data_set_my_realm_val(uint8_t index, u_register_t val);
+
+#endif /* HOST_SHARED_DATA_H */
diff --git a/include/runtime_services/host_realm_managment/realm_def.h b/include/runtime_services/host_realm_managment/realm_def.h
new file mode 100644
index 000000000..2dd94d09c
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/realm_def.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef REALM_DEF_H
+#define REALM_DEF_H
+
+#include <xlat_tables_defs.h>
+
+/* 1MB for Realm payload as a default value */
+#define REALM_MAX_LOAD_IMG_SIZE U(0x100000)
+#define REALM_STACK_SIZE 0x1000U
+#define DATA_PATTERN_1 0x12345678U
+#define DATA_PATTERN_2 0x11223344U
+#define REALM_SUCCESS 0U
+#define REALM_ERROR 1U
+#define MAX_REC_COUNT 8U
+#define MAX_REALM_COUNT U(2)
+
+/* Only support 4KB at the moment */
+
+#if (PAGE_SIZE == PAGE_SIZE_4KB)
+#define PAGE_ALIGNMENT PAGE_SIZE_4KB
+#define TCR_TG0 TCR_TG0_4K
+#else
+#error "Undefined value for PAGE_SIZE"
+#endif
+
+#endif /* REALM_DEF_H */
diff --git a/include/runtime_services/host_realm_managment/rmi_spm_tests.h b/include/runtime_services/host_realm_managment/rmi_spm_tests.h
new file mode 100644
index 000000000..11c8c12f6
--- /dev/null
+++ b/include/runtime_services/host_realm_managment/rmi_spm_tests.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#ifndef REALM_PAYLOAD_TEST_H
+#define REALM_PAYLOAD_TEST_H
+
+#define NUM_GRANULES 5U
+#define NUM_RANDOM_ITERATIONS 7U
+#define B_DELEGATED 0U
+#define B_UNDELEGATED 1U
+#define NUM_CPU_DED_SPM PLATFORM_CORE_COUNT / 2U
+
+#endif /* REALM_PAYLOAD_TEST_H */
diff --git a/include/runtime_services/psci.h b/include/runtime_services/psci.h
index f93ee407a..0e56bdcc6 100644
--- a/include/runtime_services/psci.h
+++ b/include/runtime_services/psci.h
@@ -16,7 +16,6 @@
#ifndef __ASSEMBLY__
#include <assert.h>
-#include <platform_def.h>
#include <stdbool.h>
#include <stdint.h>
#endif
@@ -208,6 +207,12 @@ extern const psci_function_t psci_functions[PSCI_NUM_CALLS];
#define PSCI_E_INVALID_ADDRESS -9
/*******************************************************************************
+ * PSCI suspend mode related constants.
+ ******************************************************************************/
+#define PSCI_PLAT_COORD 0x0
+#define PSCI_OS_INIT 0x1
+
+/*******************************************************************************
* PSCI affinity state related constants.
******************************************************************************/
#define PSCI_STATE_ON 0x0
diff --git a/include/runtime_services/secure_el1_payloads/tsp.h b/include/runtime_services/secure_el1_payloads/tsp.h
index 87ee6f74b..19db911ad 100644
--- a/include/runtime_services/secure_el1_payloads/tsp.h
+++ b/include/runtime_services/secure_el1_payloads/tsp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -32,6 +32,7 @@ static const uuid_t tsp_uuid = {
#define TSP_MUL 0x2002
#define TSP_DIV 0x2003
#define TSP_HANDLE_SEL1_INTR_AND_RETURN 0x2004
+#define TSP_CHECK_DIT 0x2005
/*
* Identify a TSP service from function ID filtering the last 16 bits from the
diff --git a/include/runtime_services/smccc.h b/include/runtime_services/smccc.h
index 283b4637f..b898138ac 100644
--- a/include/runtime_services/smccc.h
+++ b/include/runtime_services/smccc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -34,16 +34,19 @@
#define FUNCID_TYPE_SHIFT 31
#define FUNCID_CC_SHIFT 30
#define FUNCID_OEN_SHIFT 24
+#define FUNCID_SVE_HINT_SHIFT 16
#define FUNCID_NUM_SHIFT 0
#define FUNCID_TYPE_MASK 0x1
#define FUNCID_CC_MASK 0x1
#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_SVE_HINT_MASK 0x1
#define FUNCID_NUM_MASK 0xffff
#define FUNCID_TYPE_WIDTH 1
#define FUNCID_CC_WIDTH 1
#define FUNCID_OEN_WIDTH 6
+#define FUNCID_SVE_HINT_WIDTH 1
#define FUNCID_NUM_WIDTH 16
#define SMC_64 1
diff --git a/include/runtime_services/spm_common.h b/include/runtime_services/spm_common.h
index 50159ecb8..ad2ba0887 100644
--- a/include/runtime_services/spm_common.h
+++ b/include/runtime_services/spm_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,12 +7,19 @@
#ifndef SPM_COMMON_H
#define SPM_COMMON_H
-#include <ffa_helpers.h>
+#include <plat/common/platform.h>
+
#include <stdint.h>
#include <string.h>
+#include <ffa_helpers.h>
+
+#include <lib/extensions/sve.h>
+
/* Hypervisor ID at physical FFA instance */
#define HYP_ID (0)
+/* SPMC ID */
+#define SPMC_ID U(0x8000)
/* ID for the first Secure Partition. */
#define SPM_VM_ID_FIRST SP_ID(1)
@@ -20,6 +27,12 @@
/* INTID for the managed exit virtual interrupt. */
#define MANAGED_EXIT_INTERRUPT_ID U(4)
+/* INTID for the notification pending interrupt. */
+#define NOTIFICATION_PENDING_INTERRUPT_INTID 5
+
+/* Interrupt used for testing extended SPI handling. */
+#define IRQ_ESPI_TEST_INTID 5000
+
/** IRQ/FIQ pin used for signaling a virtual interrupt. */
enum interrupt_pin {
INTERRUPT_TYPE_IRQ,
@@ -36,12 +49,17 @@ enum interrupt_pin {
*/
#define SP_ID_MASK U(1 << 15)
#define SP_ID(x) ((x) | SP_ID_MASK)
+#define VM_ID(x) (x & ~SP_ID_MASK)
#define IS_SP_ID(x) ((x & SP_ID_MASK) != 0U)
+#define NULL_UUID (const struct ffa_uuid) { .uuid = {0} }
+
struct ffa_features_test {
const char *test_name;
unsigned int feature;
unsigned int expected_ret;
+ unsigned int param;
+ unsigned int version_added;
};
struct mailbox_buffers {
@@ -73,29 +91,13 @@ struct mailbox_buffers {
/**
* Helpers to evaluate returns of FF-A calls.
*/
-bool is_ffa_call_error(smc_ret_values val);
-bool is_ffa_direct_response(smc_ret_values ret);
-bool is_expected_ffa_return(smc_ret_values ret, uint32_t func_id);
-
-/*
- * Vector length:
- * SIMD: 128 bits = 16 bytes
- */
-#define SIMD_VECTOR_LEN_BYTES 16
-#define SIMD_NUM_VECTORS 32
-typedef uint8_t simd_vector_t[SIMD_VECTOR_LEN_BYTES];
-
-/*
- * Fills SIMD registers with the content of the container v.
- * Number of vectors is assumed to be SIMD_NUM_VECTORS.
- */
-void fill_simd_vector_regs(const simd_vector_t v[SIMD_NUM_VECTORS]);
-
-/*
- * Reads contents of SIMD registers into the provided container v.
- * Number of vectors is assumed to be SIMD_NUM_VECTORS.
- */
-void read_simd_vector_regs(simd_vector_t v[SIMD_NUM_VECTORS]);
+bool is_ffa_call_error(struct ffa_value val);
+bool is_expected_ffa_error(struct ffa_value ret, int32_t error_code);
+bool is_ffa_direct_response(struct ffa_value ret);
+bool is_expected_ffa_return(struct ffa_value ret, uint32_t func_id);
+bool is_expected_cactus_response(struct ffa_value ret, uint32_t expected_resp,
+ uint32_t arg);
+void dump_ffa_value(struct ffa_value ret);
bool check_spmc_execution_level(void);
@@ -107,24 +109,45 @@ unsigned int get_ffa_feature_test_target(const struct ffa_features_test **test_t
*/
bool memory_retrieve(struct mailbox_buffers *mb,
struct ffa_memory_region **retrieved, uint64_t handle,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
- uint32_t mem_func);
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, ffa_memory_region_flags_t flags);
+
+bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
+ void *out, uint32_t out_size);
/**
* Helper to conduct a memory relinquish. The caller is usually the receiver,
* after it being done with the memory shared, identified by the 'handle'.
*/
bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
- ffa_vm_id_t id);
+ ffa_id_t id);
ffa_memory_handle_t memory_send(
- struct ffa_memory_region *memory_region, uint32_t mem_func,
- uint32_t fragment_length, uint32_t total_length);
+ void *send_buffer, uint32_t mem_func,
+ const struct ffa_memory_region_constituent *constituents,
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t fragment_length, uint32_t total_length,
+ struct ffa_value *ret);
ffa_memory_handle_t memory_init_and_send(
- struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
- const struct ffa_memory_region_constituent* constituents,
- uint32_t constituents_count, uint32_t mem_func);
+ void *send_buffer, size_t memory_region_max_size, ffa_id_t sender,
+ struct ffa_memory_access receivers[], uint32_t receiver_count,
+ const struct ffa_memory_region_constituent *constituents,
+ uint32_t constituents_count, uint32_t mem_func, struct ffa_value *ret);
+
+bool ffa_partition_info_helper(struct mailbox_buffers *mb,
+ const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size);
+bool enable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest);
+bool disable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest);
+
+bool ffa_partition_info_regs_helper(const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size);
+
+struct ffa_memory_access ffa_memory_access_init_permissions_from_mem_func(
+ ffa_id_t receiver_id,
+ uint32_t mem_func);
#endif /* SPM_COMMON_H */
diff --git a/include/runtime_services/spm_test_helpers.h b/include/runtime_services/spm_test_helpers.h
new file mode 100644
index 000000000..fede4ad94
--- /dev/null
+++ b/include/runtime_services/spm_test_helpers.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SPM_TEST_HELPERS_H__
+#define SPM_TEST_HELPERS_H__
+
+#include <events.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <spm_common.h>
+
+#define SKIP_TEST_IF_FFA_VERSION_LESS_THAN(major, minor) \
+ do { \
+ struct ffa_value ret = ffa_version( \
+ MAKE_FFA_VERSION(major, minor)); \
+ uint32_t version = ret.fid; \
+ \
+ if (version == FFA_ERROR_NOT_SUPPORTED) { \
+ tftf_testcase_printf("FFA_VERSION not supported.\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ \
+ if ((version & FFA_VERSION_BIT31_MASK) != 0U) { \
+ tftf_testcase_printf("FFA_VERSION bad response: %x\n", \
+ version); \
+ return TEST_RESULT_FAIL; \
+ } \
+ \
+ if (version < MAKE_FFA_VERSION(major, minor)) { \
+ tftf_testcase_printf("FFA_VERSION returned %u.%u\n" \
+ "The required version is %u.%u\n", \
+ version >> FFA_VERSION_MAJOR_SHIFT, \
+ version & FFA_VERSION_MINOR_MASK, \
+ major, minor); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (0)
+
+#define SKIP_TEST_IF_FFA_ENDPOINT_NOT_DEPLOYED(mb, ffa_uuid) \
+ do { \
+ struct ffa_value sc_ret = ffa_partition_info_get(ffa_uuid); \
+ ffa_rx_release(); \
+ if (ffa_func_id(sc_ret) == FFA_ERROR && \
+ ffa_error_code(sc_ret) == FFA_ERROR_INVALID_PARAMETER) { \
+ tftf_testcase_printf("FFA endpoint not deployed!\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } else if (ffa_func_id(sc_ret) != FFA_SUCCESS_SMC32) { \
+ ERROR("ffa_partition_info_get failed!\n"); \
+ return TEST_RESULT_FAIL; \
+ } \
+ } while (0)
+
+#define GET_TFTF_MAILBOX(mb) \
+ do { \
+ if (!get_tftf_mailbox(&mb)) { \
+ ERROR("Mailbox RXTX buffers not configured!\n"); \
+ return TEST_RESULT_FAIL; \
+ } \
+ } while (false);
+
+#define CHECK_SPMC_TESTING_SETUP(ffa_major, ffa_minor, expected_uuids) \
+ do { \
+ SKIP_TEST_IF_AARCH32(); \
+ const size_t expected_uuids_size = \
+ sizeof(expected_uuids) / sizeof(struct ffa_uuid); \
+ test_result_t ret = check_spmc_testing_set_up( \
+ ffa_major, ffa_minor, expected_uuids, \
+ expected_uuids_size); \
+ if (ret != TEST_RESULT_SUCCESS) { \
+ return ret; \
+ } \
+ } while (false);
+
+/*
+ * Helper function to reset TFTF global mailbox for SPM related tests.
+ * It calls the FFA_RXTX_UNMAP interface, for the SPMC to drop the current
+ * address.
+ */
+bool reset_tftf_mailbox(void);
+
+/*
+ * Helper function to get TFTF global mailbox for SPM related tests.
+ * Allocates RX/TX buffer pair and calls FFA_RXTX_MAP interface, for the SPMC
+ * to map them into its own S1 translation.
+ * If this function is called, and the buffers had been priorly mapped, it
+ * sets 'mb' with the respective addresses.
+ */
+bool get_tftf_mailbox(struct mailbox_buffers *mb);
+
+test_result_t check_spmc_testing_set_up(uint32_t ffa_version_major,
+ uint32_t ffa_version_minor, const struct ffa_uuid *ffa_uuids,
+ size_t ffa_uuids_size);
+
+/**
+ * Turn on all cpus to execute a test in all.
+ * - 'cpu_on_handler' should have the code containing the test.
+ * - 'cpu_booted' is used for notifying which cores the test has been executed.
+ * This should be used in the test executed by cpu_on_handler at the end of
+ * processing to make sure it complies with this function's implementation.
+ */
+test_result_t spm_run_multi_core_test(uintptr_t cpu_on_handler,
+ event_t *cpu_booted);
+
+/**
+ * Call FFA_RUN in the designated SP to make it reach the message loop.
+ * Used within CPU_ON handlers, to bring up the SP in the current core.
+ */
+bool spm_core_sp_init(ffa_id_t sp_id);
+
+/**
+ * Initializes the Mailbox for other SPM related tests that need to use
+ * RXTX buffers.
+ */
+bool mailbox_init(struct mailbox_buffers mb);
+
+#endif /* __SPM_TEST_HELPERS_H__ */
diff --git a/include/runtime_services/sprt_svc.h b/include/runtime_services/sprt_svc.h
deleted file mode 100644
index e662c28e8..000000000
--- a/include/runtime_services/sprt_svc.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SPRT_SVC_H
-#define SPRT_SVC_H
-
-#include <smccc.h>
-#include <utils_def.h>
-
-/* SPRT_VERSION helpers */
-
-#define SPRT_VERSION_MAJOR U(0)
-#define SPRT_VERSION_MAJOR_SHIFT 16
-#define SPRT_VERSION_MAJOR_MASK U(0x7FFF)
-#define SPRT_VERSION_MINOR U(1)
-#define SPRT_VERSION_MINOR_SHIFT 0
-#define SPRT_VERSION_MINOR_MASK U(0xFFFF)
-#define SPRT_VERSION_FORM(major, minor) ((((major) & SPRT_VERSION_MAJOR_MASK) \
- << SPRT_VERSION_MAJOR_SHIFT) | \
- ((minor) & SPRT_VERSION_MINOR_MASK))
-#define SPRT_VERSION_COMPILED SPRT_VERSION_FORM(SPRT_VERSION_MAJOR, \
- SPRT_VERSION_MINOR)
-
-/* SPRT function IDs */
-
-#define SPRT_FID_VERSION U(0x0)
-#define SPRT_FID_PUT_RESPONSE U(0x1)
-#define SPRT_FID_YIELD U(0x5)
-#define SPRT_FID_PANIC U(0x7)
-#define SPRT_FID_MEMORY_PERM_ATTR_GET U(0xB)
-#define SPRT_FID_MEMORY_PERM_ATTR_SET U(0xC)
-
-#define SPRT_FID_MASK U(0xFF)
-
-/* Definitions to build the complete SMC ID */
-
-#define OEN_SPRT_START U(0x20)
-#define OEN_SPRT_END U(0x2F)
-
-#define SPRT_SMC_64(sprt_fid) ((OEN_SPRT_START << FUNCID_OEN_SHIFT) | \
- (U(1) << 31) | ((sprt_fid) & SPRT_FID_MASK) | \
- (SMC_64 << FUNCID_CC_SHIFT))
-#define SPRT_SMC_32(sprt_fid) ((OEN_SPRT_START << FUNCID_OEN_SHIFT) | \
- (U(1) << 31) | ((sprt_fid) & SPRT_FID_MASK) | \
- (SMC_32 << FUNCID_CC_SHIFT))
-
-/* Complete SMC IDs */
-
-#define SPRT_VERSION SPRT_SMC_32(SPRT_FID_VERSION)
-#define SPRT_PUT_RESPONSE_AARCH64 SPRT_SMC_64(SPRT_FID_PUT_RESPONSE)
-#define SPRT_YIELD_AARCH64 SPRT_SMC_64(SPRT_FID_YIELD)
-#define SPRT_PANIC_AARCH64 SPRT_SMC_64(SPRT_FID_PANIC)
-#define SPRT_MEMORY_PERM_ATTR_GET_AARCH64 SPRT_SMC_64(SPRT_FID_MEMORY_PERM_ATTR_GET)
-#define SPRT_MEMORY_PERM_ATTR_SET_AARCH64 SPRT_SMC_64(SPRT_FID_MEMORY_PERM_ATTR_SET)
-
-/* Defines used by SPRT_MEMORY_PERM_ATTR_{GET,SET}_AARCH64 */
-
-#define SPRT_MEMORY_PERM_ATTR_RO U(0)
-#define SPRT_MEMORY_PERM_ATTR_RW U(1)
-#define SPRT_MEMORY_PERM_ATTR_RO_EXEC U(2)
-/* U(3) is reserved */
-#define SPRT_MEMORY_PERM_ATTR_MASK U(3)
-#define SPRT_MEMORY_PERM_ATTR_SHIFT 3
-
-/* SPRT error codes. */
-
-#define SPRT_SUCCESS 0
-#define SPRT_NOT_SUPPORTED -1
-#define SPRT_INVALID_PARAMETER -2
-
-#endif /* SPRT_SVC_H */
diff --git a/include/runtime_services/trng.h b/include/runtime_services/trng.h
index a5d8e4d12..41600b485 100644
--- a/include/runtime_services/trng.h
+++ b/include/runtime_services/trng.h
@@ -42,12 +42,12 @@
#define SMC_TRNG_RND 0x84000053
#define TRNG_MAX_BITS U(96)
#define TRNG_ENTROPY_MASK U(0xFFFFFFFF)
-#endif
+#endif /* __aarch64__ */
/*
* Number of TRNG calls defined in the TRNG specification.
*/
-#define TRNG_NUM_CALLS 4
+#define TRNG_NUM_CALLS (4U)
#ifndef __ASSEMBLY__
typedef struct {
@@ -67,17 +67,16 @@ smc_ret_values tftf_trng_rnd(uint32_t nbits);
/*******************************************************************************
* TRNG version
******************************************************************************/
-#define TRNG_MAJOR_VER_SHIFT (16)
-#define TRNG_VERSION(major, minor) ((major << TRNG_MAJOR_VER_SHIFT) \
- | minor)
+#define TRNG_MAJOR_VER_SHIFT (16)
+#define TRNG_VERSION(major, minor) ((major << TRNG_MAJOR_VER_SHIFT)| minor)
/*******************************************************************************
* TRNG error codes
******************************************************************************/
-#define TRNG_E_SUCCESS (0)
-#define TRNG_E_NOT_SUPPORTED (-1)
-#define TRNG_E_INVALID_PARAMS (-2)
-#define TRNG_E_NO_ENTOPY (-3)
-#define TRNG_E_NOT_IMPLEMENTED (-4)
+#define TRNG_E_SUCCESS (0)
+#define TRNG_E_NOT_SUPPORTED (-1)
+#define TRNG_E_INVALID_PARAMS (-2)
+#define TRNG_E_NO_ENTROPY (-3)
+#define TRNG_E_NOT_IMPLEMENTED (-4)
#endif /* __TRNG_H__ */
diff --git a/lib/aarch64/exception_stubs.S b/lib/aarch64/exception_stubs.S
index d418451ae..b186e82b8 100644
--- a/lib/aarch64/exception_stubs.S
+++ b/lib/aarch64/exception_stubs.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,26 +12,21 @@
* Simplistic exceptions vector table.
* All entries spin, which means all types of exceptions are unrecoverable.
*/
- .macro vector_entry_spin name
- vector_entry \name
- b \name
- end_vector_entry \name
- .endm
vector_base exception_stubs
-vector_entry_spin SynchronousExceptionSP0
-vector_entry_spin IrqSP0
-vector_entry_spin FiqSP0
-vector_entry_spin SErrorSP0
-vector_entry_spin SynchronousExceptionSPx
-vector_entry_spin IrqSPx
-vector_entry_spin FiqSPx
-vector_entry_spin SErrorSPx
-vector_entry_spin SynchronousExceptionA64
-vector_entry_spin IrqA64
-vector_entry_spin FiqA64
-vector_entry_spin SErrorA64
-vector_entry_spin SynchronousExceptionA32
-vector_entry_spin IrqA32
-vector_entry_spin FiqA32
-vector_entry_spin SErrorA32
+vector_entry_spin sync_exception_sp_el0
+vector_entry_spin irq_sp_el0
+vector_entry_spin fiq_sp_el0
+vector_entry_spin serror_sp_el0
+vector_entry_spin sync_exception_sp_elx
+vector_entry_spin irq_sp_elx
+vector_entry_spin fiq_sp_elx
+vector_entry_spin serror_sp_elx
+vector_entry_spin sync_exception_aarch64
+vector_entry_spin irq_aarch64
+vector_entry_spin fiq_aarch64
+vector_entry_spin serror_aarch64
+vector_entry_spin sync_exception_aarch32
+vector_entry_spin irq_aarch32
+vector_entry_spin fiq_aarch32
+vector_entry_spin serror_aarch32
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index b67772152..2a2d633a5 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -197,9 +197,12 @@ func fixup_gdt_reloc
* Size of Elf64_Rela structure is 24 bytes.
*/
1:
+ /* Skip R_AARCH64_NONE entry with code 0 */
+ ldr x3, [x1, #8]
+ cbz x3, 2f
+
/* Assert that the relocation type is R_AARCH64_RELATIVE */
#if ENABLE_ASSERTIONS
- ldr x3, [x1, #8]
cmp x3, #0x403
ASM_ASSERT(eq)
#endif
diff --git a/lib/errata_abi/errata_abi.c b/lib/errata_abi/errata_abi.c
new file mode 100644
index 000000000..8f2c9e3e1
--- /dev/null
+++ b/lib/errata_abi/errata_abi.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <tftf.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <errata_abi.h>
+#include <platform.h>
+#include <tftf_lib.h>
+
+const em_function_t em_functions[TOTAL_ABI_CALLS] = {
+ DEFINE_EM_FUNC(VERSION, true),
+ DEFINE_EM_FUNC(FEATURES, true),
+ DEFINE_EM_FUNC(CPU_ERRATUM_FEATURES, true),
+};
+
+int32_t tftf_em_abi_version(void)
+{
+ smc_args args = { EM_VERSION };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+bool tftf_em_abi_feature_implemented(uint32_t id)
+{
+ smc_args args = {
+ EM_FEATURES,
+ id,
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ if (ret_vals.ret0 == EM_SUCCESS) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+smc_ret_values tftf_em_abi_cpu_feature_implemented(uint32_t cpu_erratum,
+ uint32_t forward_flag)
+{
+ smc_args args = {
+ EM_CPU_ERRATUM_FEATURES,
+ cpu_erratum,
+ forward_flag
+ };
+ return tftf_smc(&args);
+}
diff --git a/lib/exceptions/aarch64/serror.c b/lib/exceptions/aarch64/serror.c
new file mode 100644
index 000000000..9c3571297
--- /dev/null
+++ b/lib/exceptions/aarch64/serror.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <serror.h>
+
+static exception_handler_t custom_serror_handler;
+
+void register_custom_serror_handler(exception_handler_t handler)
+{
+ custom_serror_handler = handler;
+}
+
+void unregister_custom_serror_handler(void)
+{
+ custom_serror_handler = NULL;
+}
+
+bool tftf_serror_handler(void)
+{
+ if (custom_serror_handler == NULL) {
+ return false;
+ }
+
+ return custom_serror_handler();
+}
diff --git a/lib/exceptions/aarch64/sync.c b/lib/exceptions/aarch64/sync.c
new file mode 100644
index 000000000..49b6bd8d9
--- /dev/null
+++ b/lib/exceptions/aarch64/sync.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <stdbool.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <sync.h>
+
+static exception_handler_t custom_sync_exception_handler;
+
+void register_custom_sync_exception_handler(exception_handler_t handler)
+{
+ custom_sync_exception_handler = handler;
+}
+
+void unregister_custom_sync_exception_handler(void)
+{
+ custom_sync_exception_handler = NULL;
+}
+
+bool tftf_sync_exception_handler(void)
+{
+ uint64_t elr_elx = IS_IN_EL2() ? read_elr_el2() : read_elr_el1();
+ bool resume = false;
+
+ if (custom_sync_exception_handler == NULL) {
+ return false;
+ }
+
+ resume = custom_sync_exception_handler();
+
+ if (resume) {
+ /* Move ELR to next instruction to allow tftf to continue */
+ if (IS_IN_EL2()) {
+ write_elr_el2(elr_elx + 4U);
+ } else {
+ write_elr_el1(elr_elx + 4U);
+ }
+ }
+
+ return resume;
+}
diff --git a/lib/irq/irq.c b/lib/exceptions/irq.c
index 70c321b19..70c321b19 100644
--- a/lib/irq/irq.c
+++ b/lib/exceptions/irq.c
diff --git a/lib/extensions/fpu/fpu.c b/lib/extensions/fpu/fpu.c
new file mode 100644
index 000000000..34cbafbfc
--- /dev/null
+++ b/lib/extensions/fpu/fpu.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <debug.h>
+#include <lib/extensions/fpu.h>
+
+#define __STR(x) #x
+#define STR(x) __STR(x)
+
+#define fill_simd_helper(num1, num2) "ldp q"#num1", q"#num2",\
+ [%0], #"STR(2 * FPU_Q_SIZE)";"
+#define read_simd_helper(num1, num2) "stp q"#num1", q"#num2",\
+ [%0], #"STR(2 * FPU_Q_SIZE)";"
+
+/* Read FPU Q[0-31] and strore it in 'q_regs' */
+void fpu_q_regs_read(fpu_q_reg_t q_regs[FPU_Q_COUNT])
+{
+ __asm__ volatile(
+ read_simd_helper(0, 1)
+ read_simd_helper(2, 3)
+ read_simd_helper(4, 5)
+ read_simd_helper(6, 7)
+ read_simd_helper(8, 9)
+ read_simd_helper(10, 11)
+ read_simd_helper(12, 13)
+ read_simd_helper(14, 15)
+ read_simd_helper(16, 17)
+ read_simd_helper(18, 19)
+ read_simd_helper(20, 21)
+ read_simd_helper(22, 23)
+ read_simd_helper(24, 25)
+ read_simd_helper(26, 27)
+ read_simd_helper(28, 29)
+ read_simd_helper(30, 31)
+ "sub %0, %0, #" STR(FPU_Q_COUNT * FPU_Q_SIZE) ";"
+ : : "r" (q_regs));
+}
+
+/* Write FPU Q[0-31] registers passed in 'q_regs' */
+static void fpu_q_regs_write(const fpu_q_reg_t q_regs[FPU_Q_COUNT])
+{
+ __asm__ volatile(
+ fill_simd_helper(0, 1)
+ fill_simd_helper(2, 3)
+ fill_simd_helper(4, 5)
+ fill_simd_helper(6, 7)
+ fill_simd_helper(8, 9)
+ fill_simd_helper(10, 11)
+ fill_simd_helper(12, 13)
+ fill_simd_helper(14, 15)
+ fill_simd_helper(16, 17)
+ fill_simd_helper(18, 19)
+ fill_simd_helper(20, 21)
+ fill_simd_helper(22, 23)
+ fill_simd_helper(24, 25)
+ fill_simd_helper(26, 27)
+ fill_simd_helper(28, 29)
+ fill_simd_helper(30, 31)
+ "sub %0, %0, #" STR(FPU_Q_COUNT * FPU_Q_SIZE) ";"
+ : : "r" (q_regs));
+}
+
+/* Read FPCR and FPSR and store it in 'cs_regs' */
+void fpu_cs_regs_read(fpu_cs_regs_t *cs_regs)
+{
+ cs_regs->fpcr = read_fpcr();
+ cs_regs->fpsr = read_fpsr();
+}
+
+/* Write FPCR and FPSR passed in 'cs_regs' */
+void fpu_cs_regs_write(const fpu_cs_regs_t *cs_regs)
+{
+ write_fpcr(cs_regs->fpcr);
+ write_fpsr(cs_regs->fpsr);
+}
+
+/*
+ * Generate random values and write it to 'q_regs', then write it to FPU Q
+ * registers.
+ */
+void fpu_q_regs_write_rand(fpu_q_reg_t q_regs[FPU_Q_COUNT])
+{
+ uint32_t rval;
+
+ rval = rand();
+
+ memset((void *)q_regs, 0, sizeof(fpu_q_reg_t) * FPU_Q_COUNT);
+ for (unsigned int num = 0U; num < FPU_Q_COUNT; num++) {
+ memset((uint8_t *)q_regs[num], rval * (num + 1),
+ sizeof(fpu_q_reg_t));
+ }
+ fpu_q_regs_write(q_regs);
+}
+
+/*
+ * Generate random values and write it to 'cs_regs', then write it to FPU FPCR
+ * and FPSR.
+ */
+void fpu_cs_regs_write_rand(fpu_cs_regs_t *cs_regs)
+{
+ memset((void *)cs_regs, 0, sizeof(fpu_cs_regs_t));
+
+ cs_regs->fpcr = rand();
+ cs_regs->fpsr = rand();
+
+ /*
+ * Write random value to FPCR FPSR.
+ * Note write will be ignored for reserved bits.
+ */
+ fpu_cs_regs_write(cs_regs);
+
+ /* Read back current FPCR and FPSR */
+ fpu_cs_regs_read(cs_regs);
+}
+
+/*
+ * Generate random values and write it to 'fpu_state', then write it to FPU Q
+ * registers, FPCR and FPSR.
+ */
+void fpu_state_write_rand(fpu_state_t *fpu_state)
+{
+ fpu_q_regs_write_rand(fpu_state->q_regs);
+ fpu_cs_regs_write_rand(&fpu_state->cs_regs);
+}
+
+/* Read FPU Q registers, FPCR and FPSR write it to 'fpu_state' */
+void fpu_state_read(fpu_state_t *fpu_state)
+{
+ fpu_q_regs_read(fpu_state->q_regs);
+ fpu_cs_regs_read(&fpu_state->cs_regs);
+}
+
+/* Return zero if FPU Q registers 's1', 's2' matches else nonzero */
+int fpu_q_regs_compare(const fpu_q_reg_t s1[FPU_Q_COUNT],
+ const fpu_q_reg_t s2[FPU_Q_COUNT])
+{
+ return memcmp(s1, s2, sizeof(fpu_q_reg_t) * FPU_Q_COUNT);
+}
+
+/*
+ * Return zero if FPU control and status registers 's1', 's2' matches else
+ * nonzero
+ */
+int fpu_cs_regs_compare(const fpu_cs_regs_t *s1, const fpu_cs_regs_t *s2)
+{
+ return memcmp(s1, s2, sizeof(fpu_cs_regs_t));
+}
+
+/* Returns 0, if FPU state 's1', 's2' matches else non-zero */
+int fpu_state_compare(const fpu_state_t *s1, const fpu_state_t *s2)
+{
+ if (fpu_q_regs_compare(s1->q_regs, s2->q_regs) != 0) {
+ return 1;
+ }
+
+ if (fpu_cs_regs_compare(&s1->cs_regs, &s2->cs_regs) != 0) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/lib/extensions/pauth/aarch64/pauth.c b/lib/extensions/pauth/aarch64/pauth.c
index 03de468b4..9608b97d9 100644
--- a/lib/extensions/pauth/aarch64/pauth.c
+++ b/lib/extensions/pauth/aarch64/pauth.c
@@ -6,7 +6,10 @@
#include <arch_helpers.h>
#include <cdefs.h>
+#include <stdbool.h>
#include <stdint.h>
+#include <debug.h>
+#include <pauth.h>
/*
* This is only a toy implementation to generate a seemingly random
@@ -24,3 +27,151 @@ uint128_t init_apkey(void)
return ((uint128_t)(key_hi) << 64) | key_lo;
}
+
+/* Check if ARMv8.3-PAuth key is enabled */
+static bool is_pauth_key_enabled(uint64_t key_bit)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ if (el == 1U) {
+ return ((read_sctlr_el1() & key_bit) != 0U);
+ } else if (el == 2U) {
+ return ((read_sctlr_el2() & key_bit) != 0U);
+ }
+ return false;
+}
+
+bool pauth_test_lib_compare_template(uint128_t *pauth_keys_before, uint128_t *pauth_keys_after)
+{
+ bool result = true;
+
+ pauth_test_lib_read_keys(pauth_keys_after);
+ for (unsigned int i = 0U; i < NUM_KEYS; ++i) {
+ if (pauth_keys_before[i] != pauth_keys_after[i]) {
+ ERROR("AP%sKey_EL1 read 0x%llx:%llx "
+ "expected 0x%llx:%llx\n", key_name[i],
+ (uint64_t)(pauth_keys_after[i] >> 64U),
+ (uint64_t)(pauth_keys_after[i]),
+ (uint64_t)(pauth_keys_before[i] >> 64U),
+ (uint64_t)(pauth_keys_before[i]));
+
+ result = false;
+ }
+ }
+ return result;
+}
+
+/*
+ * Program or read ARMv8.3-PAuth keys (if already enabled)
+ * and store them in <pauth_keys_before> buffer
+ */
+void pauth_test_lib_fill_regs_and_template(uint128_t *pauth_keys_before)
+{
+ uint128_t plat_key;
+
+ (void)memset(pauth_keys_before, 0, NUM_KEYS * sizeof(uint128_t));
+
+ if (is_pauth_key_enabled(SCTLR_EnIA_BIT)) {
+ /* Read APIAKey_EL1 */
+ plat_key = read_apiakeylo_el1() |
+ ((uint128_t)(read_apiakeyhi_el1()) << 64U);
+ INFO("EnIA is set\n");
+ } else {
+ /* Program APIAKey_EL1 */
+ plat_key = init_apkey();
+ write_apiakeylo_el1((uint64_t)plat_key);
+ write_apiakeyhi_el1((uint64_t)(plat_key >> 64U));
+ }
+ pauth_keys_before[0] = plat_key;
+
+ if (is_pauth_key_enabled(SCTLR_EnIB_BIT)) {
+ /* Read APIBKey_EL1 */
+ plat_key = read_apibkeylo_el1() |
+ ((uint128_t)(read_apibkeyhi_el1()) << 64U);
+ INFO("EnIB is set\n");
+ } else {
+ /* Program APIBKey_EL1 */
+ plat_key = init_apkey();
+ write_apibkeylo_el1((uint64_t)plat_key);
+ write_apibkeyhi_el1((uint64_t)(plat_key >> 64U));
+ }
+ pauth_keys_before[1] = plat_key;
+
+ if (is_pauth_key_enabled(SCTLR_EnDA_BIT)) {
+ /* Read APDAKey_EL1 */
+ plat_key = read_apdakeylo_el1() |
+ ((uint128_t)(read_apdakeyhi_el1()) << 64U);
+ INFO("EnDA is set\n");
+ } else {
+ /* Program APDAKey_EL1 */
+ plat_key = init_apkey();
+ write_apdakeylo_el1((uint64_t)plat_key);
+ write_apdakeyhi_el1((uint64_t)(plat_key >> 64U));
+ }
+ pauth_keys_before[2] = plat_key;
+
+ if (is_pauth_key_enabled(SCTLR_EnDB_BIT)) {
+ /* Read APDBKey_EL1 */
+ plat_key = read_apdbkeylo_el1() |
+ ((uint128_t)(read_apdbkeyhi_el1()) << 64U);
+ INFO("EnDB is set\n");
+ } else {
+ /* Program APDBKey_EL1 */
+ plat_key = init_apkey();
+ write_apdbkeylo_el1((uint64_t)plat_key);
+ write_apdbkeyhi_el1((uint64_t)(plat_key >> 64U));
+ }
+ pauth_keys_before[3] = plat_key;
+
+ pauth_keys_before[4] = read_apgakeylo_el1() |
+ ((uint128_t)(read_apgakeyhi_el1()) << 64U);
+ if (pauth_keys_before[4] == 0ULL) {
+ /* Program APGAKey_EL1 */
+ plat_key = init_apkey();
+ write_apgakeylo_el1((uint64_t)plat_key);
+ write_apgakeyhi_el1((uint64_t)(plat_key >> 64U));
+ pauth_keys_before[4] = plat_key;
+ }
+
+ isb();
+}
+
+/*
+ * Read ARMv8.3-PAuth keys and store them in
+ * <pauth_keys_arr> buffer
+ */
+void pauth_test_lib_read_keys(uint128_t *pauth_keys_arr)
+{
+ (void)memset(pauth_keys_arr, 0, NUM_KEYS * sizeof(uint128_t));
+
+ /* Read APIAKey_EL1 */
+ pauth_keys_arr[0] = read_apiakeylo_el1() |
+ ((uint128_t)(read_apiakeyhi_el1()) << 64U);
+
+ /* Read APIBKey_EL1 */
+ pauth_keys_arr[1] = read_apibkeylo_el1() |
+ ((uint128_t)(read_apibkeyhi_el1()) << 64U);
+
+ /* Read APDAKey_EL1 */
+ pauth_keys_arr[2] = read_apdakeylo_el1() |
+ ((uint128_t)(read_apdakeyhi_el1()) << 64U);
+
+ /* Read APDBKey_EL1 */
+ pauth_keys_arr[3] = read_apdbkeylo_el1() |
+ ((uint128_t)(read_apdbkeyhi_el1()) << 64U);
+
+ /* Read APGAKey_EL1 */
+ pauth_keys_arr[4] = read_apgakeylo_el1() |
+ ((uint128_t)(read_apgakeyhi_el1()) << 64U);
+}
+
+/* Test execution of ARMv8.3-PAuth instructions */
+void pauth_test_lib_test_intrs(void)
+{
+ /* Pointer authentication instructions */
+ __asm__ volatile (
+ "paciasp\n"
+ "autiasp\n"
+ "paciasp\n"
+ "xpaclri");
+}
diff --git a/lib/extensions/sme/aarch64/sme.c b/lib/extensions/sme/aarch64/sme.c
new file mode 100644
index 000000000..ee21578b3
--- /dev/null
+++ b/lib/extensions/sme/aarch64/sme.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2021-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <lib/extensions/sme.h>
+
+/*
+ * Function: sme_smstart
+ * This function enables streaming mode and ZA array storage access
+ * independently or together based on the type of instruction variant.
+ *
+ * Parameters
+ * smstart_type: If SMSTART, streaming mode and ZA access is enabled.
+ * If SMSTART_SM, streaming mode enabled.
+ * If SMSTART_ZA enables SME ZA storage and, ZT0 storage access.
+ */
+void sme_smstart(smestart_instruction_type_t smstart_type)
+{
+ u_register_t svcr = 0ULL;
+
+ switch (smstart_type) {
+ case SMSTART:
+ svcr = (SVCR_SM_BIT | SVCR_ZA_BIT);
+ break;
+
+ case SMSTART_SM:
+ svcr = SVCR_SM_BIT;
+ break;
+
+ case SMSTART_ZA:
+ svcr = SVCR_ZA_BIT;
+ break;
+
+ default:
+ ERROR("Illegal SMSTART Instruction Variant\n");
+ break;
+ }
+ write_svcr(read_svcr() | svcr);
+
+ isb();
+}
+
+/*
+ * sme_smstop
+ * This function exits streaming mode and disables ZA array storage access
+ * independently or together based on the type of instruction variant.
+ *
+ * Parameters
+ * smstop_type: If SMSTOP, exits streaming mode and ZA access is disabled
+ * If SMSTOP_SM, exits streaming mode.
+ * If SMSTOP_ZA disables SME ZA storage and, ZT0 storage access.
+ */
+void sme_smstop(smestop_instruction_type_t smstop_type)
+{
+ u_register_t svcr = 0ULL;
+
+ switch (smstop_type) {
+ case SMSTOP:
+ svcr = (~SVCR_SM_BIT) & (~SVCR_ZA_BIT);
+ break;
+
+ case SMSTOP_SM:
+ svcr = ~SVCR_SM_BIT;
+ break;
+
+ case SMSTOP_ZA:
+ svcr = ~SVCR_ZA_BIT;
+ break;
+
+ default:
+ ERROR("Illegal SMSTOP Instruction Variant\n");
+ break;
+ }
+ write_svcr(read_svcr() & svcr);
+
+ isb();
+}
+
+/* Set the Streaming SVE vector length (SVL) in the SMCR_EL2 register */
+void sme_config_svq(uint32_t svq)
+{
+ u_register_t smcr_el2_val;
+
+ /* cap svq to arch supported max value */
+ if (svq > SME_SVQ_ARCH_MAX) {
+ svq = SME_SVQ_ARCH_MAX;
+ }
+
+ smcr_el2_val = read_smcr_el2();
+
+ smcr_el2_val &= ~(MASK(SMCR_ELX_LEN));
+ smcr_el2_val |= INPLACE(SMCR_ELX_LEN, svq);
+
+ write_smcr_el2(smcr_el2_val);
+ isb();
+}
+
+static void set_smcr_fa64(bool enable)
+{
+ if (enable) {
+ write_smcr_el2(read_smcr_el2() | SMCR_ELX_FA64_BIT);
+ } else {
+ write_smcr_el2(read_smcr_el2() & ~SMCR_ELX_FA64_BIT);
+ }
+
+ isb();
+}
+
+/*
+ * Enable FEAT_SME_FA64, This control causes all implemented A64 instructions
+ * to be treated as legal in Streaming SVE mode at EL2, if they are treated as
+ * legal at EL3.
+ */
+void sme_enable_fa64(void)
+{
+ return set_smcr_fa64(true);
+}
+
+/*
+ * Disable FEAT_SME_FA64, This control does not cause any instruction to be
+ * treated as legal in Streaming SVE mode.
+ */
+void sme_disable_fa64(void)
+{
+ return set_smcr_fa64(false);
+}
+
+/* Returns 'true' if the CPU is in Streaming SVE mode */
+bool sme_smstat_sm(void)
+{
+ return ((read_svcr() & SVCR_SM_BIT) != 0U);
+}
+
+bool sme_feat_fa64_enabled(void)
+{
+ return ((read_smcr_el2() & SMCR_ELX_FA64_BIT) != 0U);
+}
diff --git a/lib/extensions/sme/aarch64/sme2_helpers.S b/lib/extensions/sme/aarch64/sme2_helpers.S
new file mode 100644
index 000000000..e5925e08f
--- /dev/null
+++ b/lib/extensions/sme/aarch64/sme2_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <arch.h>
+
+ .arch armv8-a+sve
+ .globl sme2_load_zt0_instruction
+ .globl sme2_store_zt0_instruction
+
+/*
+ * TODO: Due to the limitation with toolchain, SME2 intrinsics, still not
+ * being supported, manually instructions are encoded using the opcodes.
+ * Further, when the toolchain supports the requirements, these macros could
+ * be refactored.
+ */
+.macro _check_general_reg_number nr
+ .if ((\nr) < 0) || ((\nr) > 30)
+ .error "Bad register number \nr."
+ .endif
+.endm
+
+/*
+ * LDR (ZT0) : Load ZT0 register with 64byte data.
+ * Instruction: LDR ZT0, [<Xn|SP>]
+ *
+ * LDR ZT0, nx
+ * Opcode bit field:
+ * nx : 64-bit name of the general-purpose base register
+ */
+.macro _ldr_zt nx
+ _check_general_reg_number \nx
+ .inst 0xe11f8000 | (((\nx) & 0x1f) << 5)
+.endm
+
+/*
+ * STR (ZT0) : Store the 64-byte ZT0 register to the memory address
+ * provided in the 64-bit base register or stack pointer.
+ * Instruction: STR ZT0, [<Xn|SP>]
+ *
+ * STR ZT0, nx
+ * Opcode bit field:
+ * nx : 64-bit name of the general-purpose base register
+ */
+.macro _str_zt nx
+ .inst 0xe13f8000 | (((\nx) & 0x1f) << 5)
+.endm
+
+/*
+ * void sme2_load_zt0_instruction;
+ *
+ * This function loads data from input buffer pointed
+ * to by X0 register into the 512 bits ZT0 register.
+ */
+func sme2_load_zt0_instruction
+ _ldr_zt 0
+ ret
+endfunc sme2_load_zt0_instruction
+
+/*
+ * void sme2_store_zt0_instruction;
+ *
+ * This function stores data from the SME2 ZT0 register
+ * into the memory section pointed by the x0 register.
+ * It copies 512bits of data to the inout memory buffer.
+ */
+func sme2_store_zt0_instruction
+ _str_zt 0
+ ret
+endfunc sme2_store_zt0_instruction
diff --git a/lib/extensions/sme/aarch64/sme_helpers.S b/lib/extensions/sme/aarch64/sme_helpers.S
new file mode 100644
index 000000000..af6c1b34b
--- /dev/null
+++ b/lib/extensions/sme/aarch64/sme_helpers.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2021-2023, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <assert_macros.S>
+
+ .arch armv8-a+sve
+ .globl sme_rdsvl_1
+ .globl sme_try_illegal_instruction
+ .globl sme_vector_to_ZA
+ .globl sme_ZA_to_vector
+
+
+/*
+ * TODO: Due to the limitation with toolchain, SME intrinsics, still not being
+ * supported, instructions are manually encoded using the opcodes.
+ * Further, when the toolchain supports the requirements, these macros could
+ * be refactored.
+ */
+
+
+/*
+ * LDR (Loads a vector (an array of elements ) to ZA array ):
+ * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ *
+ * Arguments/Opcode bit field:
+ * nw : the vector select register W12-W15
+ * nxbase : 64-bit name of the general-purpose base register.
+ * offset : vector select and optional memory offset. Default to 0.
+ */
+.macro _ldr_za nw, nxbase, offset=0
+ .inst 0xe1000000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 0xf)
+.endm
+
+/*
+ * STR ( It stores an array of elements from ZA array to a vector ).
+ * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ *
+ * Arguments/Opcode bit field:
+ * nw : the vector select register W12-W15
+ * nxbase : 64-bit name of the general-purpose base register.
+ * offset : vector select and optional memory offset. Default to 0.
+ */
+.macro _str_za nw, nxbase, offset=0
+ .inst 0xe1200000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 0xf)
+.endm
+
+/*
+ * RDSVL - Read multiple of Streaming SVE vector register size to scalar register
+ * RDSVL <Xd>, #<imm>
+ *
+ * Arguments/Opcode bit field:
+ * Xd : 64-bit name of the general-purpose base register.
+ * imm : signed immediate operand (imm6)
+ */
+.macro _sme_rdsvl xd, imm
+ .inst 0x04bf5800 \
+ | (((\imm) & 0x3f) << 5) \
+ | (\xd)
+.endm
+
+/*
+ * uint64_t sme_rdsvl_1(void);
+ *
+ * Run rdsvl instruction with imm #1.
+ */
+func sme_rdsvl_1
+ _sme_rdsvl 0, 1
+ ret
+endfunc sme_rdsvl_1
+
+/*
+ * void sme_try_illegal_instruction(void);
+ *
+ * This function tests that illegal instructions are allowed to run when
+ * FA64 is supported. RDFFR is explicitly stated to be illegal in the SME
+ * specification section F1.1.2 unless FA64 is supported and enabled.
+ */
+func sme_try_illegal_instruction
+ rdffr p0.b
+ ret
+endfunc sme_try_illegal_instruction
+
+
+/**
+ * void sme_vector_to_ZA(uint64_t *input_vec)
+ *
+ * This function loads an vector of elements to an ZA Array storage
+ */
+func sme_vector_to_ZA
+ mov w12, wzr
+ _ldr_za 12, 0 // ZA.H[W12] loaded from [X0 / input_vector]
+ ret
+endfunc sme_vector_to_ZA
+
+/**
+ * void sme_ZA_to_vector(uint64_t *out_vec)
+ *
+ * This function stores elements from ZA Array storage to an ZA vector
+ */
+func sme_ZA_to_vector
+ mov w12, wzr
+ _str_za 12, 0 // ZA.H[W12] stored to [X0 / out_vector]
+ ret
+endfunc sme_ZA_to_vector
diff --git a/lib/extensions/sve/aarch64/sve.c b/lib/extensions/sve/aarch64/sve.c
new file mode 100644
index 000000000..2c0e38f00
--- /dev/null
+++ b/lib/extensions/sve/aarch64/sve.c
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <lib/extensions/fpu.h>
+#include <lib/extensions/sve.h>
+#include <tftf_lib.h>
+
+static uint8_t zero_mem[512];
+
+#define sve_traps_save_disable(flags) \
+ do { \
+ if (IS_IN_EL2()) { \
+ flags = read_cptr_el2(); \
+ write_cptr_el2(flags & ~(CPTR_EL2_TZ_BIT)); \
+ } else { \
+ flags = read_cpacr_el1(); \
+ write_cpacr_el1(flags | \
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE));\
+ } \
+ isb(); \
+ } while (false)
+
+#define sve_traps_restore(flags) \
+ do { \
+ if (IS_IN_EL2()) { \
+ write_cptr_el2(flags); \
+ } else { \
+ write_cpacr_el1(flags); \
+ } \
+ isb(); \
+ } while (false)
+
+static void config_vq(uint8_t sve_vq)
+{
+ u_register_t zcr_elx;
+
+ if (IS_IN_EL2()) {
+ zcr_elx = read_zcr_el2();
+ zcr_elx &= ~(MASK(ZCR_EL2_SVE_VL));
+ zcr_elx |= INPLACE(ZCR_EL2_SVE_VL, sve_vq);
+ write_zcr_el2(zcr_elx);
+ } else {
+ zcr_elx = read_zcr_el1();
+ zcr_elx &= ~(MASK(ZCR_EL1_SVE_VL));
+ zcr_elx |= INPLACE(ZCR_EL1_SVE_VL, sve_vq);
+ write_zcr_el1(zcr_elx);
+ }
+ isb();
+}
+
+/* Returns the SVE implemented VL in bytes (constrained by ZCR_EL3.LEN) */
+uint64_t sve_rdvl_1(void)
+{
+ uint64_t vl;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ "rdvl %0, #1;"
+ ".arch_extension nosve\n"
+ : "=r" (vl)
+ );
+
+ sve_traps_restore(flags);
+ return vl;
+}
+
+uint64_t sve_read_zcr_elx(void)
+{
+ unsigned long flags;
+ uint64_t rval;
+
+ sve_traps_save_disable(flags);
+
+ if (IS_IN_EL2()) {
+ rval = read_zcr_el2();
+ } else {
+ rval = read_zcr_el1();
+ }
+
+ sve_traps_restore(flags);
+
+ return rval;
+}
+
+void sve_write_zcr_elx(uint64_t rval)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ if (IS_IN_EL2()) {
+ write_zcr_el2(rval);
+ } else {
+ write_zcr_el1(rval);
+ }
+ isb();
+
+ sve_traps_restore(flags);
+
+ return;
+}
+
+/* Set the SVE vector length in the current EL's ZCR_ELx register */
+void sve_config_vq(uint8_t sve_vq)
+{
+ unsigned long flags;
+
+ assert(is_armv8_2_sve_present());
+ sve_traps_save_disable(flags);
+
+ /* cap vq to arch supported max value */
+ if (sve_vq > SVE_VQ_ARCH_MAX) {
+ sve_vq = SVE_VQ_ARCH_MAX;
+ }
+
+ config_vq(sve_vq);
+
+ sve_traps_restore(flags);
+}
+
+/*
+ * Probes all valid vector length upto 'sve_max_vq'. Configures ZCR_ELx with 0
+ * to 'sve_max_vq'. And for each step, call sve_rdvl to get the vector length.
+ * Convert the vector length to VQ and set the bit corresponding to the VQ.
+ * Returns:
+ * bitmap corresponding to each support VL
+ */
+uint32_t sve_probe_vl(uint8_t sve_max_vq)
+{
+ uint32_t vl_bitmap = 0;
+ uint8_t vq, rdvl_vq;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ /* cap vq to arch supported max value */
+ if (sve_max_vq > SVE_VQ_ARCH_MAX) {
+ sve_max_vq = SVE_VQ_ARCH_MAX;
+ }
+
+ for (vq = 0; vq <= sve_max_vq; vq++) {
+ config_vq(vq);
+ rdvl_vq = SVE_VL_TO_VQ(sve_rdvl_1());
+ if (vl_bitmap & BIT_32(rdvl_vq)) {
+ continue;
+ }
+ vl_bitmap |= BIT_32(rdvl_vq);
+ }
+
+ sve_traps_restore(flags);
+
+ return vl_bitmap;
+}
+
+/*
+ * Write SVE Z[0-31] registers passed in 'z_regs' for Normal SVE or Streaming
+ * SVE mode
+ */
+static void z_regs_write(const sve_z_regs_t *z_regs)
+{
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ fill_sve_helper(0)
+ fill_sve_helper(1)
+ fill_sve_helper(2)
+ fill_sve_helper(3)
+ fill_sve_helper(4)
+ fill_sve_helper(5)
+ fill_sve_helper(6)
+ fill_sve_helper(7)
+ fill_sve_helper(8)
+ fill_sve_helper(9)
+ fill_sve_helper(10)
+ fill_sve_helper(11)
+ fill_sve_helper(12)
+ fill_sve_helper(13)
+ fill_sve_helper(14)
+ fill_sve_helper(15)
+ fill_sve_helper(16)
+ fill_sve_helper(17)
+ fill_sve_helper(18)
+ fill_sve_helper(19)
+ fill_sve_helper(20)
+ fill_sve_helper(21)
+ fill_sve_helper(22)
+ fill_sve_helper(23)
+ fill_sve_helper(24)
+ fill_sve_helper(25)
+ fill_sve_helper(26)
+ fill_sve_helper(27)
+ fill_sve_helper(28)
+ fill_sve_helper(29)
+ fill_sve_helper(30)
+ fill_sve_helper(31)
+ ".arch_extension nosve\n"
+ : : "r" (z_regs));
+}
+
+/*
+ * Write SVE Z[0-31] registers passed in 'z_regs' for Normal SVE or Streaming
+ * SVE mode
+ */
+void sve_z_regs_write(const sve_z_regs_t *z_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+ z_regs_write(z_regs);
+ sve_traps_restore(flags);
+}
+
+/*
+ * Read SVE Z[0-31] and store it in 'zregs' for Normal SVE or Streaming SVE mode
+ */
+void sve_z_regs_read(sve_z_regs_t *z_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ read_sve_helper(0)
+ read_sve_helper(1)
+ read_sve_helper(2)
+ read_sve_helper(3)
+ read_sve_helper(4)
+ read_sve_helper(5)
+ read_sve_helper(6)
+ read_sve_helper(7)
+ read_sve_helper(8)
+ read_sve_helper(9)
+ read_sve_helper(10)
+ read_sve_helper(11)
+ read_sve_helper(12)
+ read_sve_helper(13)
+ read_sve_helper(14)
+ read_sve_helper(15)
+ read_sve_helper(16)
+ read_sve_helper(17)
+ read_sve_helper(18)
+ read_sve_helper(19)
+ read_sve_helper(20)
+ read_sve_helper(21)
+ read_sve_helper(22)
+ read_sve_helper(23)
+ read_sve_helper(24)
+ read_sve_helper(25)
+ read_sve_helper(26)
+ read_sve_helper(27)
+ read_sve_helper(28)
+ read_sve_helper(29)
+ read_sve_helper(30)
+ read_sve_helper(31)
+ ".arch_extension nosve\n"
+ : : "r" (z_regs));
+
+ sve_traps_restore(flags);
+}
+
+static void p_regs_write(const sve_p_regs_t *p_regs)
+{
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ fill_sve_p_helper(0)
+ fill_sve_p_helper(1)
+ fill_sve_p_helper(2)
+ fill_sve_p_helper(3)
+ fill_sve_p_helper(4)
+ fill_sve_p_helper(5)
+ fill_sve_p_helper(6)
+ fill_sve_p_helper(7)
+ fill_sve_p_helper(8)
+ fill_sve_p_helper(9)
+ fill_sve_p_helper(10)
+ fill_sve_p_helper(11)
+ fill_sve_p_helper(12)
+ fill_sve_p_helper(13)
+ fill_sve_p_helper(14)
+ fill_sve_p_helper(15)
+ ".arch_extension nosve\n"
+ : : "r" (p_regs));
+}
+
+/*
+ * Write SVE P[0-15] registers passed in 'p_regs' for Normal SVE or Streaming
+ * SVE mode
+ */
+void sve_p_regs_write(const sve_p_regs_t *p_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+ p_regs_write(p_regs);
+ sve_traps_restore(flags);
+}
+
+/*
+ * Read SVE P[0-15] registers and store it in 'p_regs' for Normal SVE or
+ * Streaming SVE mode
+ */
+void sve_p_regs_read(sve_p_regs_t *p_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ read_sve_p_helper(0)
+ read_sve_p_helper(1)
+ read_sve_p_helper(2)
+ read_sve_p_helper(3)
+ read_sve_p_helper(4)
+ read_sve_p_helper(5)
+ read_sve_p_helper(6)
+ read_sve_p_helper(7)
+ read_sve_p_helper(8)
+ read_sve_p_helper(9)
+ read_sve_p_helper(10)
+ read_sve_p_helper(11)
+ read_sve_p_helper(12)
+ read_sve_p_helper(13)
+ read_sve_p_helper(14)
+ read_sve_p_helper(15)
+ ".arch_extension nosve\n"
+ : : "r" (p_regs));
+
+ sve_traps_restore(flags);
+}
+
+static void ffr_regs_write(const sve_ffr_regs_t *ffr_regs)
+{
+ uint8_t sve_p_reg[SVE_P_REG_LEN_BYTES];
+
+ /* Save p0. Load 'ffr_regs' to p0 and write FFR. Restore p0 */
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ " str p0, [%1]\n"
+ " ldr p0, [%0]\n"
+ " wrffr p0.B\n"
+ " ldr p0, [%1]\n"
+ ".arch_extension nosve\n"
+ :
+ : "r" (ffr_regs), "r" (sve_p_reg)
+ : "memory");
+}
+
+/*
+ * Write SVE FFR registers passed in 'ffr_regs' for Normal SVE or Streaming SVE
+ * mode
+ */
+void sve_ffr_regs_write(const sve_ffr_regs_t *ffr_regs)
+{
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+ ffr_regs_write(ffr_regs);
+ sve_traps_restore(flags);
+}
+
+/*
+ * Read SVE FFR registers and store it in 'ffr_regs' for Normal SVE or Streaming
+ * SVE mode
+ */
+void sve_ffr_regs_read(sve_ffr_regs_t *ffr_regs)
+{
+ uint8_t sve_p_reg[SVE_P_REG_LEN_BYTES];
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ /* Save p0. Read FFR to p0 and save p0 (ffr) to 'ffr_regs'. Restore p0 */
+ __asm__ volatile(
+ ".arch_extension sve\n"
+ " str p0, [%1]\n"
+ " rdffr p0.B\n"
+ " str p0, [%0]\n"
+ " ldr p0, [%1]\n"
+ ".arch_extension nosve\n"
+ :
+ : "r" (ffr_regs), "r" (sve_p_reg)
+ : "memory");
+
+ sve_traps_restore(flags);
+}
+
+/*
+ * Generate random values and write it to 'z_regs', then write it to SVE Z
+ * registers for Normal SVE or Streaming SVE mode.
+ */
+void sve_z_regs_write_rand(sve_z_regs_t *z_regs)
+{
+ uint32_t rval;
+ uint32_t z_size;
+ uint8_t *z_reg;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ z_size = (uint32_t)sve_rdvl_1();
+
+ /* Write Z regs */
+ rval = rand();
+ memset((void *)z_regs, 0, sizeof(sve_z_regs_t));
+ for (uint32_t i = 0U; i < SVE_NUM_VECTORS; i++) {
+ z_reg = (uint8_t *)z_regs + (i * z_size);
+
+ memset((void *)z_reg, rval * (i + 1), z_size);
+ }
+ z_regs_write(z_regs);
+
+ sve_traps_restore(flags);
+}
+
+/*
+ * Generate random values and write it to 'p_regs', then write it to SVE P
+ * registers for Normal SVE or Streaming SVE mode.
+ */
+void sve_p_regs_write_rand(sve_p_regs_t *p_regs)
+{
+ uint32_t p_size;
+ uint8_t *p_reg;
+ uint32_t rval;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ p_size = (uint32_t)sve_rdvl_1() / 8;
+
+ /* Write P regs */
+ rval = rand();
+ memset((void *)p_regs, 0, sizeof(sve_p_regs_t));
+ for (uint32_t i = 0U; i < SVE_NUM_P_REGS; i++) {
+ p_reg = (uint8_t *)p_regs + (i * p_size);
+
+ memset((void *)p_reg, rval * (i + 1), p_size);
+ }
+ p_regs_write(p_regs);
+
+ sve_traps_restore(flags);
+}
+
+/*
+ * Generate random values and write it to 'ffr_regs', then write it to SVE FFR
+ * registers for Normal SVE or Streaming SVE mode.
+ */
+void sve_ffr_regs_write_rand(sve_ffr_regs_t *ffr_regs)
+{
+ uint32_t ffr_size;
+ uint8_t *ffr_reg;
+ uint32_t rval;
+ unsigned long flags;
+
+ sve_traps_save_disable(flags);
+
+ ffr_size = (uint32_t)sve_rdvl_1() / 8;
+
+ rval = rand();
+ memset((void *)ffr_regs, 0, sizeof(sve_ffr_regs_t));
+ for (uint32_t i = 0U; i < SVE_NUM_FFR_REGS; i++) {
+ ffr_reg = (uint8_t *)ffr_regs + (i * ffr_size);
+
+ memset((void *)ffr_reg, rval * (i + 1), ffr_size);
+ }
+ ffr_regs_write(ffr_regs);
+
+ sve_traps_restore(flags);
+}
+
+/*
+ * Compare Z registers passed in 's1' (old values) with 's2' (new values).
+ * This routine works for Normal SVE or Streaming SVE mode.
+ *
+ * Returns:
+ * 0 : All Z[0-31] registers in 's1' and 's2' are equal
+ * nonzero : Sets the Nth bit of the Z register that is not equal
+ */
+uint64_t sve_z_regs_compare(const sve_z_regs_t *s1, const sve_z_regs_t *s2)
+{
+ uint32_t z_size;
+ uint64_t cmp_bitmap = 0UL;
+ bool sve_hint;
+
+ /*
+ * 'rdvl' returns Streaming SVE VL if PSTATE.SM=1 else returns normal
+ * SVE VL
+ */
+ z_size = (uint32_t)sve_rdvl_1();
+
+ /* Ignore sve_hint for Streaming SVE mode */
+ if (is_feat_sme_supported() && sme_smstat_sm()) {
+ sve_hint = false;
+ } else {
+ sve_hint = tftf_smc_get_sve_hint();
+ }
+
+ for (uint32_t i = 0U; i < SVE_NUM_VECTORS; i++) {
+ uint8_t *s1_z = (uint8_t *)s1 + (i * z_size);
+ uint8_t *s2_z = (uint8_t *)s2 + (i * z_size);
+
+ /*
+ * For Z register the comparison is successful when
+ * 1. whole Z register of 's1' and 's2' is equal or
+ * 2. sve_hint is set and the lower 128 bits of 's1' and 's2' is
+ * equal and remaining upper bits of 's2' is zero
+ */
+ if ((memcmp(s1_z, s2_z, z_size) == 0) ||
+ (sve_hint && (z_size > FPU_Q_SIZE) &&
+ (memcmp(s1_z, s2_z, FPU_Q_SIZE) == 0) &&
+ (memcmp(s2_z + FPU_Q_SIZE, zero_mem,
+ z_size - FPU_Q_SIZE) == 0))) {
+ continue;
+ }
+
+ cmp_bitmap |= BIT_64(i);
+ VERBOSE("SVE Z_%u mismatch\n", i);
+ }
+
+ return cmp_bitmap;
+}
+
+/*
+ * Compare P registers passed in 's1' (old values) with 's2' (new values).
+ * This routine works for Normal SVE or Streaming SVE mode.
+ *
+ * Returns:
+ * 0 : All P[0-15] registers in 's1' and 's2' are equal
+ * nonzero : Sets the Nth bit of the P register that is not equal
+ */
+uint64_t sve_p_regs_compare(const sve_p_regs_t *s1, const sve_p_regs_t *s2)
+{
+ uint32_t p_size;
+ uint64_t cmp_bitmap = 0UL;
+ bool sve_hint;
+
+ /* Size of one predicate register 1/8 of Z register */
+ p_size = (uint32_t)sve_rdvl_1() / 8U;
+
+ /* Ignore sve_hint for Streaming SVE mode */
+ if (is_feat_sme_supported() && sme_smstat_sm()) {
+ sve_hint = false;
+ } else {
+ sve_hint = tftf_smc_get_sve_hint();
+ }
+
+ for (uint32_t i = 0U; i < SVE_NUM_P_REGS; i++) {
+ uint8_t *s1_p = (uint8_t *)s1 + (i * p_size);
+ uint8_t *s2_p = (uint8_t *)s2 + (i * p_size);
+
+ /*
+ * For P register the comparison is successful when
+ * 1. whole P register of 's1' and 's2' is equal or
+ * 2. sve_hint is set and the P register of 's2' is zero
+ */
+ if ((memcmp(s1_p, s2_p, p_size) == 0) ||
+ (sve_hint && (memcmp(s2_p, zero_mem, p_size) == 0))) {
+ continue;
+ }
+
+ cmp_bitmap |= BIT_64(i);
+ VERBOSE("SVE P_%u mismatch\n", i);
+ }
+
+ return cmp_bitmap;
+}
+
+/*
+ * Compare FFR register passed in 's1' (old values) with 's2' (new values).
+ * This routine works for Normal SVE or Streaming SVE mode.
+ *
+ * Returns:
+ * 0 : FFR register in 's1' and 's2' are equal
+ * nonzero : FFR register is not equal
+ */
+uint64_t sve_ffr_regs_compare(const sve_ffr_regs_t *s1, const sve_ffr_regs_t *s2)
+{
+ uint32_t ffr_size;
+ uint64_t cmp_bitmap = 0UL;
+ bool sve_hint;
+
+ /* Size of one FFR register 1/8 of Z register */
+ ffr_size = (uint32_t)sve_rdvl_1() / 8U;
+
+ /* Ignore sve_hint for Streaming SVE mode */
+ if (is_feat_sme_supported() && sme_smstat_sm()) {
+ sve_hint = false;
+ } else {
+ sve_hint = tftf_smc_get_sve_hint();
+ }
+
+ for (uint32_t i = 0U; i < SVE_NUM_FFR_REGS; i++) {
+ uint8_t *s1_ffr = (uint8_t *)s1 + (i * ffr_size);
+ uint8_t *s2_ffr = (uint8_t *)s2 + (i * ffr_size);
+
+ /*
+ * For FFR register the comparison is successful when
+ * 1. whole FFR register of 's1' and 's2' is equal or
+ * 2. sve_hint is set and the FFR register of 's2' is zero
+ */
+ if ((memcmp(s1_ffr, s2_ffr, ffr_size) == 0) ||
+ (sve_hint && (memcmp(s2_ffr, zero_mem, ffr_size) == 0))) {
+ continue;
+ }
+
+ cmp_bitmap |= BIT_64(i);
+ VERBOSE("SVE FFR_%u mismatch:\n", i);
+ }
+
+ return cmp_bitmap;
+}
diff --git a/lib/extensions/sve/aarch64/sve_helpers.S b/lib/extensions/sve/aarch64/sve_helpers.S
new file mode 100644
index 000000000..128b35025
--- /dev/null
+++ b/lib/extensions/sve/aarch64/sve_helpers.S
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+.global sve_subtract_arrays_interleaved
+.global sve_subtract_arrays
+
+#if __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0)
+
+/*
+ * Based on example code from:
+ * Arm Compiler Scalable Vector Extension User Guide Version 6.12 [1].
+ *
+ * [1] https://developer.arm.com/documentation/100891/0612/getting-started-with-the-sve-compiler/compiling-c-and-c---code-for-sve-enabled-targets
+ */
+
+/*
+ * Subtracts arrays using SVE operations with interleaved callback.
+ * dst_array = src_array_1 - src_array_2
+ * Inputs:
+ * x0 - dst_array
+ * x1 - src_array_1
+ * x2 - src_array_2
+ * x3 - array size
+ * x4 - callback function pointer
+ * Returns:
+ * Callback function's return value
+ */
+func sve_subtract_arrays_interleaved
+.arch_extension sve
+ stp x29, x30, [sp, #-80]!
+ mov x29, sp
+ stp x19, x20, [sp, #16]
+ mov x19, x0
+ mov x20, x1
+ stp x21, x22, [sp, #32]
+ mov x21, x2
+ mov x22, x3
+ stp x23, x24, [sp, #48]
+ mov x23, x4
+ mov x24, x3
+ str x25, [sp, #64]
+ mov x25, 0
+
+ whilelo p0.s, xzr, x4
+.loop:
+ ld1w z0.s, p0/z, [x20, x25, lsl 2]
+ ld1w z1.s, p0/z, [x21, x25, lsl 2]
+
+ /* Invoke the world switch callback */
+ blr x23
+
+ /* Exit loop if callback returns non-zero */
+ cmp w0, #0x0
+ bne .exit_loop
+
+ sub z0.s, z0.s, z1.s
+ st1w z0.s, p0, [x19, x25, lsl 2]
+ incw x25
+
+ whilelo p0.s, x25, x24
+ bne .loop
+.exit_loop:
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldr x25, [sp, #64]
+ ldp x29, x30, [sp], #80
+ ret
+.arch_extension nosve
+endfunc sve_subtract_arrays_interleaved
+
+/*
+ * Subtracts arrays using SVE operations.
+ * dst_array = src_array_1 - src_array_2
+ * Inputs:
+ * x0 - dst_array
+ * x1 - src_array_1
+ * x2 - src_array_2
+ * x3 - array size
+ * Returns:
+ * none
+ */
+func sve_subtract_arrays
+.arch_extension sve
+ mov x4, x3
+ mov x5, 0
+ whilelo p0.s, xzr, x3
+.sub_loop:
+ ld1w z0.s, p0/z, [x1, x5, lsl 2]
+ ld1w z1.s, p0/z, [x2, x5, lsl 2]
+ sub z0.s, z0.s, z1.s
+ st1w z0.s, p0, [x0, x5, lsl 2]
+ incw x5
+ whilelo p0.s, x5, x4
+ bne .sub_loop
+ ret
+.arch_extension nosve
+endfunc sve_subtract_arrays
+
+#endif /* __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0) */
diff --git a/lib/heap/page_alloc.c b/lib/heap/page_alloc.c
new file mode 100644
index 000000000..c1b54c87a
--- /dev/null
+++ b/lib/heap/page_alloc.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <debug.h>
+#include <heap/page_alloc.h>
+#include <spinlock.h>
+#include <utils_def.h>
+
+#include <platform_def.h>
+
+static uint64_t memory_used;
+static uint64_t heap_base_addr;
+static u_register_t heap_addr;
+static uint64_t heap_size;
+static bool heap_initialised = HEAP_INIT_FAILED;
+static spinlock_t mem_lock;
+
+/*
+ * Initialize the memory heap space to be used
+ * @heap_base: heap base address
+ * @heap_len: heap size for use
+ */
+int page_pool_init(uint64_t heap_base, uint64_t heap_len)
+{
+ const uint64_t plat_max_addr = (uint64_t)DRAM_BASE + (uint64_t)DRAM_SIZE;
+ uint64_t max_addr = heap_base + heap_len;
+
+ if (heap_len == 0ULL) {
+ ERROR("heap_len must be non-zero value\n");
+ heap_initialised = HEAP_INVALID_LEN;
+ } else if (max_addr >= plat_max_addr) {
+ ERROR("heap_base + heap[0x%llx] must not exceed platform"
+ "max address[0x%llx]\n", max_addr, plat_max_addr);
+
+ heap_initialised = HEAP_OUT_OF_RANGE;
+ } else {
+ heap_base_addr = heap_base;
+ memory_used = heap_base;
+ heap_size = heap_len;
+ heap_initialised = HEAP_INIT_SUCCESS;
+ }
+ return heap_initialised;
+}
+
+/*
+ * Return the pointer to the allocated pages
+ * @bytes_size: pages to allocate in byte unit
+ */
+void *page_alloc(u_register_t bytes_size)
+{
+ if (heap_initialised != HEAP_INIT_SUCCESS) {
+ ERROR("heap need to be initialised first\n");
+ return HEAP_NULL_PTR;
+ }
+ if (bytes_size == 0UL) {
+ ERROR("bytes_size must be non-zero value\n");
+ return HEAP_NULL_PTR;
+ }
+
+ spin_lock(&mem_lock);
+
+ if ((memory_used + bytes_size) >= (heap_base_addr + heap_size)) {
+ ERROR("Reached to max KB allowed[%llu]\n", (heap_size/1024U));
+ goto unlock_failed;
+ }
+ /* set pointer to current used heap memory cursor */
+ heap_addr = memory_used;
+ /* move used memory cursor by bytes_size */
+ memory_used += bytes_size;
+ spin_unlock(&mem_lock);
+
+ return (void *)heap_addr;
+
+unlock_failed:/* failed allocation */
+ spin_unlock(&mem_lock);
+ return HEAP_NULL_PTR;
+}
+
+/*
+ * Reset heap memory usage cursor to heap base address
+ */
+void page_pool_reset(void)
+{
+ /*
+ * No race condition here, only lead cpu running TFTF test case can
+ * reset the memory allocation
+ */
+ memory_used = heap_base_addr;
+}
+
+void page_free(u_register_t address)
+{
+ /* No memory free is needed in current TFTF test scenarios */
+}
diff --git a/lib/libc/snprintf.c b/lib/libc/snprintf.c
index 29c50df4d..6ad284f01 100644
--- a/lib/libc/snprintf.c
+++ b/lib/libc/snprintf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,7 +9,6 @@
#include <stdlib.h>
#include <common/debug.h>
-#include <plat/common/platform.h>
#define get_num_va_args(_args, _lcount) \
(((_lcount) > 1) ? va_arg(_args, long long int) : \
diff --git a/lib/power_management/suspend/aarch64/asm_tftf_suspend.S b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
index 09950b5c0..e715e49a7 100644
--- a/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
+++ b/lib/power_management/suspend/aarch64/asm_tftf_suspend.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -72,7 +72,7 @@ func __tftf_save_arch_context
ret
2: mrs x1, mair_el2
- mrs x2, hcr_el2
+ mrs x2, cptr_el2
mrs x3, ttbr0_el2
mrs x4, tcr_el2
mrs x5, vbar_el2
@@ -80,6 +80,8 @@ func __tftf_save_arch_context
stp x1, x2, [x0, #SUSPEND_CTX_MAIR_OFFSET]
stp x3, x4, [x0, #SUSPEND_CTX_TTBR0_OFFSET]
stp x5, x6, [x0, #SUSPEND_CTX_VBAR_OFFSET]
+ mrs x1, hcr_el2
+ str x1, [x0, #SUSPEND_CTX_HCR_OFFSET]
ret
endfunc __tftf_save_arch_context
@@ -114,10 +116,13 @@ func __tftf_cpu_resume_ep
ldp x3, x4, [x0, #SUSPEND_CTX_TTBR0_OFFSET]
ldp x5, x6, [x0, #SUSPEND_CTX_VBAR_OFFSET]
msr mair_el2, x1
- msr hcr_el2, x2
+ msr cptr_el2, x2
msr ttbr0_el2, x3
msr tcr_el2, x4
msr vbar_el2, x5
+ ldr x1, [x0, #SUSPEND_CTX_HCR_OFFSET]
+ msr hcr_el2, x1
+
/*
* TLB invalidations need to be completed before enabling MMU
*/
diff --git a/lib/power_management/suspend/suspend_private.h b/lib/power_management/suspend/suspend_private.h
index bc2f3a600..2ccf7a377 100644
--- a/lib/power_management/suspend/suspend_private.h
+++ b/lib/power_management/suspend/suspend_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,20 +9,22 @@
/*
* Number of system registers we need to save/restore across a CPU suspend:
- * MAIR, CPACR_EL1/HCR_EL2, TTBR0, TCR, VBAR, SCTLR,
+ * EL1: MAIR, CPACR, TTBR0, TCR, VBAR, SCTLR
+ * EL2: MAIR, CPTR, TTBR0, TCR, VBAR, SCTLR, HCR
* APIAKeyLo_EL1 and APIAKeyHi_EL1 (if enabled).
*/
#if ENABLE_PAUTH
-#define NR_CTX_REGS 8
+#define NR_CTX_REGS 10
#else
-#define NR_CTX_REGS 6
+#define NR_CTX_REGS 8
#endif
/* Offsets of the fields in the context structure. Needed by asm code. */
#define SUSPEND_CTX_MAIR_OFFSET 0
#define SUSPEND_CTX_TTBR0_OFFSET 16
#define SUSPEND_CTX_VBAR_OFFSET 32
-#define SUSPEND_CTX_APIAKEY_OFFSET 48
+#define SUSPEND_CTX_HCR_OFFSET 48
+#define SUSPEND_CTX_APIAKEY_OFFSET 64
#define SUSPEND_CTX_SP_OFFSET (8 * NR_CTX_REGS)
#define SUSPEND_CTX_SAVE_SYSTEM_CTX_OFFSET (SUSPEND_CTX_SP_OFFSET + 8)
diff --git a/lib/psci/psci.c b/lib/psci/psci.c
index 857b01e5f..aace0921e 100644
--- a/lib/psci/psci.c
+++ b/lib/psci/psci.c
@@ -81,6 +81,17 @@ int32_t tftf_psci_cpu_off(void)
return ret_vals.ret0;
}
+int32_t tftf_psci_set_suspend_mode(uint32_t mode)
+{
+ smc_args args = {
+ SMC_PSCI_SET_SUSPEND_MODE,
+ mode
+ };
+ smc_ret_values ret_vals;
+
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
u_register_t tftf_psci_stat_residency(u_register_t target_cpu,
uint32_t power_state)
@@ -185,6 +196,8 @@ int tftf_psci_make_composite_state_id(uint32_t affinity_level,
ret = PSCI_E_INVALID_PARAMS;
}
}
+ *state_id |= psci_make_local_state_id(PLAT_MAX_PWR_LEVEL + 1,
+ affinity_level);
return ret;
}
diff --git a/lib/smc/aarch64/smc.c b/lib/smc/aarch64/smc.c
index 6667ee7be..9912e7277 100644
--- a/lib/smc/aarch64/smc.c
+++ b/lib/smc/aarch64/smc.c
@@ -1,24 +1,99 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <arch.h>
+#include <arch_features.h>
+#include <lib/extensions/sve.h>
#include <stdint.h>
+#include <smccc.h>
#include <tftf.h>
+#include <utils_def.h>
-smc_ret_values asm_tftf_smc64(uint32_t fid,
- u_register_t arg1,
- u_register_t arg2,
- u_register_t arg3,
- u_register_t arg4,
- u_register_t arg5,
- u_register_t arg6,
- u_register_t arg7);
+static void sve_enable(void)
+{
+ if (IS_IN_EL2()) {
+ write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TZ_BIT);
+ } else {
+ write_cpacr_el1(read_cpacr_el1() |
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE));
+ }
+
+ isb();
+}
+
+static void sve_disable(void)
+{
+ if (IS_IN_EL2()) {
+ write_cptr_el2(read_cptr_el2() | CPTR_EL2_TZ_BIT);
+ } else {
+ unsigned long val = read_cpacr_el1();
+
+ val &= ~CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE);
+ val |= CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_ALL);
+ write_cpacr_el1(val);
+ }
+
+ isb();
+}
+
+static bool is_sve_enabled(void)
+{
+ if (IS_IN_EL2()) {
+ return ((read_cptr_el2() & CPTR_EL2_TZ_BIT) == 0UL);
+ } else {
+ return ((read_cpacr_el1() &
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE)) ==
+ CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE));
+ }
+}
+
+/*
+ * Use Trap control register SVE flags to represent SVE hint bit. On SVE capable
+ * CPU, setting sve_hint_flag = true denotes absence of SVE (disables SVE), else
+ * presence of SVE (enables SVE).
+ */
+void tftf_smc_set_sve_hint(bool sve_hint_flag)
+{
+ if (!is_armv8_2_sve_present()) {
+ return;
+ }
+
+ if (sve_hint_flag) {
+ sve_disable();
+ } else {
+ sve_enable();
+ }
+}
+
+/*
+ * On SVE capable CPU, return value of 'true' denotes SVE not used and return
+ * value of 'false' denotes SVE used.
+ *
+ * If the CPU do not support SVE, always return 'false'.
+ */
+bool tftf_smc_get_sve_hint(void)
+{
+ if (is_armv8_2_sve_present()) {
+ return is_sve_enabled() ? false : true;
+ }
+
+ return false;
+}
smc_ret_values tftf_smc(const smc_args *args)
{
- return asm_tftf_smc64(args->fid,
+ uint32_t fid = args->fid;
+
+ if (tftf_smc_get_sve_hint()) {
+ fid |= MASK(FUNCID_SVE_HINT);
+ } else {
+ fid &= ~MASK(FUNCID_SVE_HINT);
+ }
+
+ return asm_tftf_smc64(fid,
args->arg1,
args->arg2,
args->arg3,
diff --git a/lib/sprt/aarch64/sprt_client_helpers.S b/lib/sprt/aarch64/sprt_client_helpers.S
deleted file mode 100644
index 46068540c..000000000
--- a/lib/sprt/aarch64/sprt_client_helpers.S
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-
- .globl sprt_client_svc
-
-func sprt_client_svc
- /*
- * Save the address of the svc_args structure on the stack.
- *
- * Although x0 contains an 8-byte value, we are allocating 16 bytes
- * on the stack to respect the 16-byte stack-alignment.
- */
- str x0, [sp, #-16]!
-
- /* Load the SVC arguments values into the appropriate registers. */
- ldp x6, x7, [x0, #48]
- ldp x4, x5, [x0, #32]
- ldp x2, x3, [x0, #16]
- ldp x0, x1, [x0, #0]
-
- svc #0
-
- /*
- * Pop the svc_args structure address from the stack into a caller-saved
- * register.
- */
- ldr x9, [sp], #16
-
- /*
- * The return values are stored in x0-x3, put them in the svc_args
- * return structure.
- */
- stp x0, x1, [x9, #0]
- stp x2, x3, [x9, #16]
- ret
-endfunc sprt_client_svc
diff --git a/lib/sprt/sprt_client.c b/lib/sprt/sprt_client.c
deleted file mode 100644
index 41f530762..000000000
--- a/lib/sprt/sprt_client.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-#include <errno.h>
-#include <sprt_svc.h>
-#include <stddef.h>
-
-#include "sprt_client.h"
-#include "sprt_client_private.h"
-#include "sprt_common.h"
-#include "sprt_queue.h"
-
-uint32_t sprt_version(void)
-{
- struct svc_args args;
-
- args.arg0 = SPRT_VERSION;
-
- return sprt_client_svc(&args);
-}
-
-void sprt_wait_for_messages(void)
-{
- struct svc_args args;
-
- args.arg0 = SPRT_YIELD_AARCH64;
-
- sprt_client_svc(&args);
-}
-
-/*
- * Variable that points to the memory buffer that contains the queues used by
- * this Secure Partition.
- */
-static void *queue_messages;
-
-void sprt_initialize_queues(void *buffer_base)
-{
- queue_messages = buffer_base;
-}
-
-int sprt_get_next_message(struct sprt_queue_entry_message *message,
- int queue_num)
-{
- struct sprt_queue *q = queue_messages;
-
- while (queue_num-- > 0) {
- uintptr_t next_addr = (uintptr_t)q + sizeof(struct sprt_queue) +
- q->entry_num * q->entry_size;
- q = (struct sprt_queue *) next_addr;
- }
-
- return sprt_queue_pop(q, message);
-}
-
-void sprt_message_end(struct sprt_queue_entry_message *message,
- u_register_t arg0, u_register_t arg1, u_register_t arg2,
- u_register_t arg3)
-{
- struct svc_args args;
-
- if (message->type == SPRT_MSG_TYPE_SERVICE_REQUEST) {
- args.arg0 = SPRT_PUT_RESPONSE_AARCH64;
- args.arg1 = message->token;
- }
-
- args.arg2 = arg0;
- args.arg3 = arg1;
- args.arg4 = arg2;
- args.arg5 = arg3;
- args.arg6 = ((uint32_t)message->service_handle << 16U)
- | message->client_id;
- args.arg7 = message->session_id;
-
- sprt_client_svc(&args);
-}
diff --git a/lib/sprt/sprt_client.mk b/lib/sprt/sprt_client.mk
deleted file mode 100644
index 8d2243044..000000000
--- a/lib/sprt/sprt_client.mk
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-SPRT_LIB_SOURCES := $(addprefix lib/sprt/, \
- ${ARCH}/sprt_client_helpers.S \
- sprt_client.c \
- sprt_queue.c)
-
-SPRT_LIB_INCLUDES := -Iinclude/lib/sprt/
diff --git a/lib/sprt/sprt_client_private.h b/lib/sprt/sprt_client_private.h
deleted file mode 100644
index 57d8dc5a2..000000000
--- a/lib/sprt/sprt_client_private.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SPRT_CLIENT_PRIVATE_H
-#define SPRT_CLIENT_PRIVATE_H
-
-#include <stdint.h>
-
-struct svc_args {
- u_register_t arg0;
- u_register_t arg1;
- u_register_t arg2;
- u_register_t arg3;
- u_register_t arg4;
- u_register_t arg5;
- u_register_t arg6;
- u_register_t arg7;
-};
-
-/*
- * Invoke an SVC call.
- *
- * The arguments to pass through the SVC call must be stored in the svc_args
- * structure. The return values of the SVC call will be stored in the same
- * structure (overriding the input arguments).
- *
- * Returns the first return value. It is equivalent to args.arg0 but is also
- * provided as the return value for convenience.
- */
-u_register_t sprt_client_svc(struct svc_args *args);
-
-#endif /* SPRT_CLIENT_PRIVATE_H */
diff --git a/lib/sprt/sprt_queue.c b/lib/sprt/sprt_queue.c
deleted file mode 100644
index 2bd4139ea..000000000
--- a/lib/sprt/sprt_queue.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-#include <errno.h>
-#include <stdint.h>
-#include <string.h>
-
-#include "sprt_queue.h"
-
-void sprt_queue_init(void *queue_base, uint32_t entry_num, uint32_t entry_size)
-{
- assert(queue_base != NULL);
- assert(entry_size > 0U);
- assert(entry_num > 0U);
-
- struct sprt_queue *queue = (struct sprt_queue *)queue_base;
-
- queue->entry_num = entry_num;
- queue->entry_size = entry_size;
- queue->idx_write = 0U;
- queue->idx_read = 0U;
-
- memset(queue->data, 0, entry_num * entry_size);
-}
-
-int sprt_queue_is_empty(void *queue_base)
-{
- assert(queue_base != NULL);
-
- struct sprt_queue *queue = (struct sprt_queue *)queue_base;
-
- return (queue->idx_write == queue->idx_read);
-}
-
-int sprt_queue_is_full(void *queue_base)
-{
- assert(queue_base != NULL);
-
- struct sprt_queue *queue = (struct sprt_queue *)queue_base;
-
- uint32_t idx_next_write = (queue->idx_write + 1) % queue->entry_num;
-
- return (idx_next_write == queue->idx_read);
-}
-
-int sprt_queue_push(void *queue_base, const void *entry)
-{
- assert(entry != NULL);
- assert(queue_base != NULL);
-
- if (sprt_queue_is_full(queue_base) != 0) {
- return -ENOMEM;
- }
-
- struct sprt_queue *queue = (struct sprt_queue *)queue_base;
-
- uint8_t *dst_entry = &queue->data[queue->entry_size * queue->idx_write];
-
- memcpy(dst_entry, entry, queue->entry_size);
-
- /*
- * Make sure that the message data is visible before increasing the
- * counter of available messages.
- */
- __asm__ volatile("dmb st" ::: "memory");
-
- queue->idx_write = (queue->idx_write + 1) % queue->entry_num;
-
- __asm__ volatile("dmb st" ::: "memory");
-
- return 0;
-}
-
-int sprt_queue_pop(void *queue_base, void *entry)
-{
- assert(entry != NULL);
- assert(queue_base != NULL);
-
- if (sprt_queue_is_empty(queue_base) != 0) {
- return -ENOENT;
- }
-
- struct sprt_queue *queue = (struct sprt_queue *)queue_base;
-
- uint8_t *src_entry = &queue->data[queue->entry_size * queue->idx_read];
-
- memcpy(entry, src_entry, queue->entry_size);
-
- /*
- * Make sure that the message data is visible before increasing the
- * counter of read messages.
- */
- __asm__ volatile("dmb st" ::: "memory");
-
- queue->idx_read = (queue->idx_read + 1) % queue->entry_num;
-
- __asm__ volatile("dmb st" ::: "memory");
-
- return 0;
-}
diff --git a/lib/sprt/sprt_queue.h b/lib/sprt/sprt_queue.h
deleted file mode 100644
index 4ea1bc231..000000000
--- a/lib/sprt/sprt_queue.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SPRT_QUEUE_H
-#define SPRT_QUEUE_H
-
-#include <stdint.h>
-
-/* Struct that defines a queue. Not to be used directly. */
-struct __attribute__((__packed__)) sprt_queue {
- uint32_t entry_num; /* Number of entries */
- uint32_t entry_size; /* Size of an entry */
- uint32_t idx_write; /* Index of first empty entry */
- uint32_t idx_read; /* Index of first entry to read */
- uint8_t data[0]; /* Start of data */
-};
-
-#define SPRT_QUEUE_HEADER_SIZE (sizeof(struct sprt_queue))
-
-/*
- * Initializes a memory region to be used as a queue of the given number of
- * entries with the specified size.
- */
-void sprt_queue_init(void *queue_base, uint32_t entry_num, uint32_t entry_size);
-
-/* Returns 1 if the queue is empty, 0 otherwise */
-int sprt_queue_is_empty(void *queue_base);
-
-/* Returns 1 if the queue is full, 0 otherwise */
-int sprt_queue_is_full(void *queue_base);
-
-/*
- * Pushes a new entry intro the queue. Returns 0 on success, -ENOMEM if the
- * queue is full.
- */
-int sprt_queue_push(void *queue_base, const void *entry);
-
-/*
- * Pops an entry from the queue. Returns 0 on success, -ENOENT if the queue is
- * empty.
- */
-int sprt_queue_pop(void *queue_base, void *entry);
-
-#endif /* SPRT_QUEUE_H */
diff --git a/lib/transfer_list/transfer_list.c b/lib/transfer_list/transfer_list.c
new file mode 100644
index 000000000..c83b0b368
--- /dev/null
+++ b/lib/transfer_list/transfer_list.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <transfer_list.h>
+
+struct transfer_list_entry *transfer_list_find(struct transfer_list_header *tl,
+ uint16_t tag_id)
+{
+ struct transfer_list_entry *te = (void *)tl + tl->hdr_size;
+
+ while (te->tag_id != tag_id) {
+ te += round_up(te->hdr_size + te->data_size, tl->alignment);
+ }
+
+ return te;
+}
+
+void *transfer_list_entry_data(struct transfer_list_entry *entry)
+{
+ return (uint8_t *)entry + entry->hdr_size;
+}
+
+/*******************************************************************************
+ * Verifying the header of a transfer list
+ * Compliant to 2.4.1 of Firmware handoff specification (v0.9)
+ * Return transfer list operation status code
+ ******************************************************************************/
+enum transfer_list_ops
+transfer_list_check_header(const struct transfer_list_header *tl)
+{
+ uint8_t byte_sum = 0U;
+ uint8_t *b = (uint8_t *)tl;
+
+ if (tl == NULL) {
+ return TL_OPS_NON;
+ }
+
+ if (tl->signature != TRANSFER_LIST_SIGNATURE ||
+ tl->size > tl->max_size) {
+ return TL_OPS_NON;
+ }
+
+ for (size_t i = 0; i < tl->size; i++) {
+ byte_sum += b[i];
+ }
+
+ if (byte_sum - tl->checksum == tl->checksum) {
+ return TL_OPS_NON;
+ }
+
+ return TL_OPS_ALL;
+}
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 037dbf81c..044ea0ab4 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -20,22 +20,23 @@
*/
bool xlat_arch_is_granule_size_supported(size_t size)
{
- u_register_t id_aa64mmfr0_el1 = read_id_aa64mmfr0_el1();
+ u_register_t tgranx;
if (size == PAGE_SIZE_4KB) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
- ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;
+ tgranx = get_id_aa64mmfr0_el0_tgran4();
+ /* MSB of TGRAN4 field will be '1' for unsupported feature */
+ return ((tgranx >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED) &&
+ (tgranx < 8ULL));
} else if (size == PAGE_SIZE_16KB) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
- ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;
+ tgranx = get_id_aa64mmfr0_el0_tgran16();
+ return (tgranx >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED);
} else if (size == PAGE_SIZE_64KB) {
- return ((id_aa64mmfr0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
- ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
- ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;
+ tgranx = get_id_aa64mmfr0_el0_tgran64();
+ /* MSB of TGRAN64 field will be '1' for unsupported feature */
+ return ((tgranx >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED) &&
+ (tgranx < 8ULL));
} else {
- return 0;
+ return false;
}
}
@@ -91,8 +92,7 @@ static const unsigned int pa_range_bits_arr[] = {
unsigned long long xlat_arch_get_max_supported_pa(void)
{
- u_register_t pa_range = read_id_aa64mmfr0_el1() &
- ID_AA64MMFR0_EL1_PARANGE_MASK;
+ u_register_t pa_range = get_pa_range();
/* All other values are reserved */
assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
diff --git a/make_helpers/build_macros.mk b/make_helpers/build_macros.mk
index 0ac15051b..e12f8e208 100644
--- a/make_helpers/build_macros.mk
+++ b/make_helpers/build_macros.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2015-2023, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -18,6 +18,22 @@ define assert_boolean
$(and $(patsubst 0,,$(value $(1))),$(patsubst 1,,$(value $(1))),$(error $(1) must be boolean))
endef
+0-9 := 0 1 2 3 4 5 6 7 8 9
+
+# Function to verify that a given option $(1) contains a numeric value
+define assert_numeric
+$(if $($(1)),,$(error $(1) must not be empty))
+$(eval __numeric := $($(1)))
+$(foreach d,$(0-9),$(eval __numeric := $(subst $(d),,$(__numeric))))
+$(if $(__numeric),$(error $(1) must be numeric))
+endef
+
+# Convenience function for verifying options have numeric values
+# $(eval $(call assert_numerics,FOO BOO)) will assert FOO and BOO contain numeric values
+define assert_numerics
+ $(foreach num,$1,$(eval $(call assert_numeric,$(num))))
+endef
+
# CREATE_SEQ is a recursive function to create sequence of numbers from 1 to
# $(2) and assign the sequence to $(1)
define CREATE_SEQ
@@ -28,3 +44,16 @@ $(if $(word $(2), $($(1))),\
$(call CREATE_SEQ,$(1),$(2))\
)
endef
+
+# Convenience function to check for a given linker option. An call to
+# $(call ld_option, --no-XYZ) will return --no-XYZ if supported by the linker
+define ld_option
+ $(shell if $(LD) $(1) -v >/dev/null 2>&1; then echo $(1); fi )
+endef
+
+# Convenience function to check for a given compiler option. An call to
+# $(call cc_option, --no-XYZ) will return --no-XYZ if supported by the compiler
+define cc_option
+ $(shell if $(CC) $(1) -c -x c /dev/null -o /dev/null >/dev/null 2>&1; then echo $(1); fi )
+endef
+
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 2e18d67f7..d26ec06d3 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -45,3 +45,13 @@ USE_NVM := 0
# Build verbosity
V := 0
+
+# Select the branch protection features to use
+BRANCH_PROTECTION := 0
+
+# Build RME stack
+ENABLE_REALM_PAYLOAD_TESTS := 0
+
+# Use the Firmware Handoff framework to receive configurations from preceding
+# bootloader.
+TRANSFER_LIST := 0
diff --git a/plat/arm/common/arm_fwu_io_storage.c b/plat/arm/common/arm_fwu_io_storage.c
index 5af3e0600..2f44a19e2 100644
--- a/plat/arm/common/arm_fwu_io_storage.c
+++ b/plat/arm/common/arm_fwu_io_storage.c
@@ -157,6 +157,9 @@ int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
assert(image_id < ARRAY_SIZE(policies));
policy = &policies[image_id];
+ if (policy->check == NULL) {
+ return result;
+ }
result = policy->check(policy->image_spec);
if (result == IO_SUCCESS) {
*image_spec = policy->image_spec;
diff --git a/plat/arm/corstone1000/corstone1000_def.h b/plat/arm/corstone1000/corstone1000_def.h
new file mode 100644
index 000000000..3e6f036ac
--- /dev/null
+++ b/plat/arm/corstone1000/corstone1000_def.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __CORSTONE1000_DEF_H__
+#define __CORSTONE1000_DEF_H__
+
+#include <common_def.h>
+#include <platform_def.h>
+
+/*******************************************************************************
+ * HOST memory map related constants
+ ******************************************************************************/
+
+#define HOST_PERIPHERAL_BASE (0x1A000000)
+#define HOST_PERIPHERAL_SIZE (608 * SZ_1M)
+
+#define ON_CHIP_MEM_BASE (0x02000000)
+#define ON_CHIP_MEM_SIZE (SZ_4M)
+
+#define DRAM_BASE (0x80000000)
+#define DRAM_SIZE (SZ_2G)
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+/* GIC memory map */
+#define GICD_BASE 0x1C010000
+#define GICC_BASE 0x1C02F000
+/* GIC re-distributor doesn't exits on gic-400, but we still need to
+ * provide GICR_BASE as the gic driver needs it
+ */
+#define GICR_BASE 0x0
+
+/*******************************************************************************
+ * PL011 related constants
+ ******************************************************************************/
+#define PL011_UART0_BASE 0x1A510000
+#define PL011_UART1_BASE 0x1A520000
+
+#define PL011_UART0_CLK_IN_HZ 50000000
+#define PL011_UART1_CLK_IN_HZ 50000000
+
+#define PLAT_ARM_UART_BASE PL011_UART0_BASE
+#define PLAT_ARM_UART_CLK_IN_HZ PL011_UART0_CLK_IN_HZ
+
+#endif /* __CORSTONE1000_DEF_H__ */
diff --git a/plat/arm/corstone1000/corstone1000_mem_prot.c b/plat/arm/corstone1000/corstone1000_mem_prot.c
new file mode 100644
index 000000000..8e4a27027
--- /dev/null
+++ b/plat/arm/corstone1000/corstone1000_mem_prot.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <psci.h>
+#include <utils_def.h>
+#include <xlat_tables_v2.h>
+
+#define NS_IMAGE_OFFSET TFTF_BASE
+#define NS_IMAGE_LIMIT (NS_IMAGE_OFFSET + (32 << TWO_MB_SHIFT))
+
+static const mem_region_t corstone1000_ram_ranges[] = {
+ {NS_IMAGE_LIMIT, 128 << TWO_MB_SHIFT},
+};
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+ *nelem = ARRAY_SIZE(corstone1000_ram_ranges);
+ return corstone1000_ram_ranges;
+}
diff --git a/plat/arm/corstone1000/corstone1000_pwr_state.c b/plat/arm/corstone1000/corstone1000_pwr_state.c
new file mode 100644
index 000000000..b2f0bc82a
--- /dev/null
+++ b/plat/arm/corstone1000/corstone1000_pwr_state.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+
+/*
+ * State IDs for local power states on Corstone1000.
+ */
+#define CORSTONE1000_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
+#define CORSTONE1000_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define CORSTONE1000_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
+
+/*
+ * Suspend depth definitions for each power state
+ */
+typedef enum {
+ CORSTONE1000_RUN_DEPTH = 0,
+ CORSTONE1000_RETENTION_DEPTH,
+ CORSTONE1000_OFF_DEPTH,
+} suspend_depth_t;
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+ {CORSTONE1000_RETENTION_DEPTH, CORSTONE1000_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+ {CORSTONE1000_OFF_DEPTH, CORSTONE1000_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/*
+ * The state property array with details of idle state possible
+ * for the cluster
+ */
+static const plat_state_prop_t cluster_state_prop[] = {
+ {CORSTONE1000_OFF_DEPTH, CORSTONE1000_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/*
+ * The state property array with details of idle state possible
+ * for the system. Currently Corstone1000 does not support CPU SUSPEND
+ * at system power level.
+ */
+static const plat_state_prop_t system_state_prop[] = {
+ {CORSTONE1000_OFF_DEPTH, CORSTONE1000_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+ switch (level) {
+ case MPIDR_AFFLVL0:
+ return core_state_prop;
+ case MPIDR_AFFLVL1:
+ return cluster_state_prop;
+ case MPIDR_AFFLVL2:
+ return system_state_prop;
+ default:
+ return NULL;
+ }
+}
diff --git a/plat/arm/corstone1000/corstone1000_topology.c b/plat/arm/corstone1000/corstone1000_topology.c
new file mode 100644
index 000000000..37055ec4a
--- /dev/null
+++ b/plat/arm/corstone1000/corstone1000_topology.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <arch.h>
+#include <tftf_lib.h>
+#include <plat_topology.h>
+#include <platform_def.h>
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} corstone1000_cores[] = {
+ /* SMP with single core, cluster_id is same as cpu_id */
+ { 0, 0 },
+};
+
+/*
+ * The Corstone1000 power domain tree descriptor. Corstone1000 implements a system
+ * power domain at the level 2. The first entry in the power domain descriptor
+ * specifies the number of power domains at the highest power level. For Corstone1000
+ * this is 1 i.e. the number of system power domain.
+ */
+static const unsigned char corstone1000_power_domain_tree_desc[] = {
+ /* Number of root nodes */
+ PLATFORM_SYSTEM_COUNT,
+ /* Number of children of root node */
+ PLATFORM_CLUSTER_COUNT,
+ /* Number of children for the second cluster */
+ PLATFORM_CLUSTER0_CORE_COUNT
+};
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return corstone1000_power_domain_tree_desc;
+}
+
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ return make_mpid(corstone1000_cores[core_pos].cluster_id,
+ corstone1000_cores[core_pos].cpu_id);
+}
diff --git a/plat/arm/corstone1000/include/platform_def.h b/plat/arm/corstone1000/include/platform_def.h
new file mode 100644
index 000000000..a0d6f7b3b
--- /dev/null
+++ b/plat/arm/corstone1000/include/platform_def.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <utils_def.h>
+
+#include "../corstone1000_def.h"
+
+/*******************************************************************************
+ * Platform definitions used by common code
+ ******************************************************************************/
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#ifdef __aarch64__
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+#else
+#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
+#define PLATFORM_LINKER_ARCH arm
+#endif
+
+/*******************************************************************************
+ * Run-time address of the TFTF image.
+ * It has to match the location where the Trusted Firmware-A loads the BL33
+ * image.
+ ******************************************************************************/
+#define TFTF_BASE 0x80000000
+
+/******************************************************************************
+ * Memory mapped Generic timer interfaces
+ *******************************************************************************/
+/* REFCLK CNTControl, Generic Timer. Secure Access only. */
+#define SYS_CNT_CONTROL_BASE 0x1A200000
+/* REFCLK CNTRead, Generic Timer. */
+#define SYS_CNT_READ_BASE 0x1A210000
+/* AP_REFCLK CNTBase1, Generic Timer. */
+#define SYS_CNT_BASE1 0x1A240000
+
+/*******************************************************************************
+ * Base address and size of external NVM flash
+ ******************************************************************************/
+#define FLASH_BASE 0x08000000
+
+#define NOR_FLASH_BLOCK_SIZE 0x40000
+#define NOR_FLASH_BLOCKS_COUNT 255
+#define FLASH_SIZE (NOR_FLASH_BLOCK_SIZE * NOR_FLASH_BLOCKS_COUNT)
+
+/* watchdog timer */
+#define IRQ_TWDOG_INTID 56
+
+/* Size of cacheable stacks */
+#if IMAGE_NS_BL1U
+#define PLATFORM_STACK_SIZE 0x1000
+#elif IMAGE_NS_BL2U
+#define PLATFORM_STACK_SIZE 0x1000
+#elif IMAGE_TFTF
+#define PLATFORM_STACK_SIZE 0x1400
+#endif
+
+/* Size of coherent stacks for debug and release builds */
+#if DEBUG
+#define PCPU_DV_MEM_STACK_SIZE 0x600
+#else
+#define PCPU_DV_MEM_STACK_SIZE 0x500
+#endif
+
+#define PLATFORM_SYSTEM_COUNT 1
+#define PLATFORM_CLUSTER_COUNT 1
+#define PLATFORM_CLUSTER0_CORE_COUNT 1 /* Cortex-A35 Cluster */
+#define PLATFORM_CORE_COUNT 1
+#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \
+ PLATFORM_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_LEVEL PLATFORM_MAX_AFFLVL
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+#define CORSTONE1000_MAX_CPUS_PER_CLUSTER 4
+#define CORSTONE1000_MAX_PE_PER_CPU 1
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH 4
+
+#if IMAGE_NS_BL1U
+#define MAX_IO_DEVICES 2
+#define MAX_IO_HANDLES 2
+#else
+#define MAX_IO_DEVICES 1
+#define MAX_IO_HANDLES 1
+#endif
+
+/*
+ * USE 0x200000 DRAM offset to store TFTF data
+ *
+ * Please note that this won't be suitable for all test scenarios and
+ * for this reason some tests will be disabled in this configuration.
+ */
+#define TFTF_NVM_OFFSET 0x40000
+#define TFTF_NVM_SIZE (128 * SZ_1M) /* 128 MB */
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#ifdef __aarch64__
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 34)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 34)
+#else
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 32)
+#endif
+
+#if IMAGE_TFTF
+/* For testing xlat tables lib v2 */
+#define MAX_XLAT_TABLES 20
+#define MAX_MMAP_REGIONS 50
+#else
+#define MAX_XLAT_TABLES 5
+#define MAX_MMAP_REGIONS 16
+#endif
+
+/*******************************************************************************
+ * Used to align variables on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/*******************************************************************************
+ * Non-Secure Software Generated Interupts IDs
+ ******************************************************************************/
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_1 1
+#define IRQ_NS_SGI_2 2
+#define IRQ_NS_SGI_3 3
+#define IRQ_NS_SGI_4 4
+#define IRQ_NS_SGI_5 5
+#define IRQ_NS_SGI_6 6
+#define IRQ_NS_SGI_7 7
+
+#define PLAT_MAX_SPI_OFFSET_ID 220
+
+/* The IRQ generated by Ethernet controller */
+#define IRQ_ETHERNET 116
+
+#define IRQ_CNTPSIRQ1 92
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER 26
+/* Per-CPU Non-Secure Timer Interrupt ID */
+#define IRQ_PCPU_NS_TIMER 30
+
+/*
+ * Times(in ms) used by test code for completion of different events.
+ * Suspend entry time for debug build is high due to the time taken
+ * by the VERBOSE/INFO prints. The value considers the worst case scenario
+ * where all CPUs are going and coming out of suspend continuously.
+ */
+#if DEBUG
+#define PLAT_SUSPEND_ENTRY_TIME 0x100
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 0x200
+#else
+#define PLAT_SUSPEND_ENTRY_TIME 10
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 20
+#endif
+
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/arm/corstone1000/plat_helpers.S b/plat/arm/corstone1000/plat_helpers.S
new file mode 100644
index 000000000..a87f36f1a
--- /dev/null
+++ b/plat/arm/corstone1000/plat_helpers.S
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <drivers/arm/pl011.h>
+#include "corstone1000_def.h"
+
+ .globl platform_get_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(unsigned long mpid)
+ *
+ * Function to calculate the core position on FVP.
+ *
+ * (ClusterId * FVP_MAX_CPUS_PER_CLUSTER * FVP_MAX_PE_PER_CPU) +
+ * (CPUId * FVP_MAX_PE_PER_CPU) +
+ * ThreadId
+ *
+ * which can be simplified as:
+ *
+ * ((ClusterId * FVP_MAX_CPUS_PER_CLUSTER + CPUId) * FVP_MAX_PE_PER_CPU)
+ * + ThreadId
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /*
+ * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+ * look as if in a multi-threaded implementation.
+ */
+ tst x0, #MPIDR_MT_MASK
+ lsl x3, x0, #MPIDR_AFFINITY_BITS
+ csel x3, x3, x0, eq
+
+ /* Extract individual affinity fields from MPIDR */
+ ubfx x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* Compute linear position */
+ mov x3, #CORSTONE1000_MAX_CPUS_PER_CLUSTER
+ madd x1, x2, x3, x1
+ mov x3, #CORSTONE1000_MAX_PE_PER_CPU
+ madd x0, x1, x3, x0
+ ret
+endfunc platform_get_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, PLAT_ARM_UART_BASE
+ mov_imm x1, PLAT_ARM_UART_CLK_IN_HZ
+ mov_imm x2, PL011_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : r0 - r1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/arm/corstone1000/plat_setup.c b/plat/arm/corstone1000/plat_setup.c
new file mode 100644
index 000000000..342885d76
--- /dev/null
+++ b/plat/arm/corstone1000/plat_setup.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/arm/arm_gic.h>
+#include <mmio.h>
+#include <platform.h>
+#include <xlat_tables_v2.h>
+#include <plat_arm.h>
+
+/*
+ * Table of regions to map using the MMU.
+ */
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(HOST_PERIPHERAL_BASE, HOST_PERIPHERAL_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(ON_CHIP_MEM_BASE, ON_CHIP_MEM_SIZE, MT_MEMORY | MT_RW | MT_SECURE),
+#if USE_NVM
+ MAP_REGION_FLAT(FLASH_BASE, FLASH_SIZE, MT_DEVICE | MT_RW | MT_NS),
+#endif
+ MAP_REGION_FLAT(DRAM_BASE, TFTF_BASE - DRAM_BASE, MT_MEMORY | MT_RW | MT_NS),
+ {0}
+};
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+ return mmap;
+}
+
+void tftf_platform_setup(void)
+{
+ arm_platform_setup();
+}
+
+void plat_arm_gic_init(void)
+{
+ arm_gic_init(GICC_BASE, GICD_BASE, GICR_BASE);
+}
diff --git a/plat/arm/corstone1000/platform.mk b/plat/arm/corstone1000/platform.mk
new file mode 100644
index 000000000..a5a011d51
--- /dev/null
+++ b/plat/arm/corstone1000/platform.mk
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := -Iplat/arm/corstone1000/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/timer/private_timer.c \
+ drivers/arm/timer/system_timer.c \
+ plat/arm/corstone1000/plat_helpers.S \
+ plat/arm/corstone1000/corstone1000_pwr_state.c \
+ plat/arm/corstone1000/corstone1000_topology.c \
+ plat/arm/corstone1000/corstone1000_mem_prot.c \
+ plat/arm/corstone1000/plat_setup.c
+
+PLAT_SUPPORTS_NS_RESET := 1
+
+# Process PLAT_SUPPORTS_NS_RESET flag
+$(eval $(call assert_boolean,PLAT_SUPPORTS_NS_RESET))
+$(eval $(call add_define,TFTF_DEFINES,PLAT_SUPPORTS_NS_RESET))
+
+FIRMWARE_UPDATE := 0
+PLAT_TESTS_SKIP_LIST := plat/arm/corstone1000/tests_to_skip.txt
+
+include plat/arm/common/arm_common.mk
diff --git a/plat/arm/corstone1000/tests_to_skip.txt b/plat/arm/corstone1000/tests_to_skip.txt
new file mode 100644
index 000000000..a9282496d
--- /dev/null
+++ b/plat/arm/corstone1000/tests_to_skip.txt
@@ -0,0 +1,16 @@
+Realm payload tests
+Realm payload boot
+Realm payload multi CPU request
+Realm payload Delegate and Undelegate
+Multi CPU Realm payload Delegate and Undelegate
+Testing delegation fails
+Realm testing with SPM tests
+PSCI System Suspend Validation
+PSCI STAT/Stats test cases after system suspend
+IRQ support in TSP/Resume preempted STD SMC after PSCI SYSTEM SUSPEND
+PSCI SYSTEM SUSPEND stress tests
+Timer framework Validation/Verify the timer interrupt generation
+CPU Hotplug/CPU hotplug
+PSCI CPU Suspend
+PSCI CPU Suspend in OSI mode
+PSCI STAT/for valid composite state CPU suspend
diff --git a/plat/arm/fvp/fvp_tests_to_skip.txt b/plat/arm/fvp/fvp_tests_to_skip.txt
index d52947dc6..1769caccd 100644
--- a/plat/arm/fvp/fvp_tests_to_skip.txt
+++ b/plat/arm/fvp/fvp_tests_to_skip.txt
@@ -1,6 +1,8 @@
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# Disable Group0 secure interrupt tests as there is no source of EL3 interrupt
+FF-A Group0 interrupts
diff --git a/plat/arm/fvp/include/platform_def.h b/plat/arm/fvp/include/platform_def.h
index 3afc9b8bb..60c33e9c6 100644
--- a/plat/arm/fvp/include/platform_def.h
+++ b/plat/arm/fvp/include/platform_def.h
@@ -37,6 +37,10 @@
/* Base address of non-trusted watchdog (SP805) */
#define SP805_WDOG_BASE 0x1C0F0000
+/* Base address of trusted watchdog (SP805) */
+#define SP805_TWDOG_BASE 0x2A490000
+#define IRQ_TWDOG_INTID 56
+
/*******************************************************************************
* Base address and size of external NVM flash
******************************************************************************/
@@ -53,6 +57,43 @@
#define NOR_FLASH_BLOCKS_COUNT 255
#define FLASH_SIZE (NOR_FLASH_BLOCK_SIZE * NOR_FLASH_BLOCKS_COUNT)
+/**********************************
+ * Addresses to test invalid access
+ **********************************/
+/*
+ * The top 16MB (or 64MB if RME is enabled) of DRAM1 is configured as
+ * follows for FVP platform:
+ * - L1 GPT DRAM: Reserved for L1 GPT if RME is enabled
+ * - REALM DRAM: Reserved for Realm world if RME is enabled
+ * - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use
+ *
+ * RME enabled(64MB) RME not enabled(16MB)
+ * 0xFC00_0000 -------------------- ------------------- 0xFF00_0000
+ * | | | |
+ * 0xFD000000 | AP Secure (~28MB)| | AP TZC (~14MB) |
+ * -------------------- ------------------- 0xFFE0_0000
+ * | | | |
+ * | REALM (32MB) | | EL3 TZC (2MB) |
+ * -------------------- ------------------- 0xFFFF_FFFF
+ * | |
+ * 0xFFE0_0000 | EL3 Root (3MB) |
+ * --------------------
+ * | L1 GPT (1MB) |
+ * | |
+ * 0xFFFF_FFFF --------------------
+ *
+ *
+ */
+/* For both RME & non-RME case top 2MB will be EL3 memory */
+#define EL3_MEMORY_ACCESS_ADDR U(0xFFE00000)
+#define SECURE_MEMORY_ACCESS_ADDR U(0xFD000000)
+
+/*******************************************************************************
+ * Base address and size for the FIP.
+ ******************************************************************************/
+#define PLAT_ARM_FIP_BASE (FLASH_BASE)
+#define PLAT_ARM_FIP_SIZE (0x100000)
+
/*******************************************************************************
* Base address and size for the FIP that contains FWU images.
******************************************************************************/
@@ -103,7 +144,7 @@
* calculated using the current NS_BL1U RW debug size plus a little space
* for growth.
******************************************************************************/
-#define NS_BL1U_RW_SIZE (0x7000)
+#define NS_BL1U_RW_SIZE (0x9000)
#define NS_BL1U_RW_BASE (NSRAM_BASE)
#define NS_BL1U_RW_LIMIT (NS_BL1U_RW_BASE + NS_BL1U_RW_SIZE)
@@ -119,7 +160,7 @@
* Base address and limit for NS_BL2U image.
******************************************************************************/
#define NS_BL2U_BASE DRAM_BASE
-#define NS_BL2U_LIMIT (NS_BL2U_BASE + 0x4D000)
+#define NS_BL2U_LIMIT (NS_BL2U_BASE + 0x4E000)
/******************************************************************************
* Memory mapped Generic timer interfaces
@@ -205,8 +246,8 @@
* Platform specific page table and MMU setup constants
******************************************************************************/
#ifdef __aarch64__
-#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 34)
-#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 34)
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << PA_SIZE)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << PA_SIZE)
#else
#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 32)
#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 32)
@@ -218,11 +259,11 @@
#define MAX_MMAP_REGIONS 50
#else
#if IMAGE_CACTUS
-#define MAX_XLAT_TABLES 6
+#define MAX_XLAT_TABLES 12
#else
#define MAX_XLAT_TABLES 5
#endif
-#define MAX_MMAP_REGIONS 16
+#define MAX_MMAP_REGIONS 20
#endif
/*******************************************************************************
diff --git a/plat/arm/fvp/platform.mk b/plat/arm/fvp/platform.mk
index 42779c720..99183ec18 100644
--- a/plat/arm/fvp/platform.mk
+++ b/plat/arm/fvp/platform.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -56,9 +56,17 @@ ifneq ($(FVP_MAX_CPUS_PER_CLUSTER),$(filter $(FVP_MAX_CPUS_PER_CLUSTER),$(CPU)))
endif
# Pass FVP topology definitions to the build system
-$(eval $(call add_define,TFTF_DEFINES,FVP_CLUSTER_COUNT))
-$(eval $(call add_define,TFTF_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
-$(eval $(call add_define,TFTF_DEFINES,FVP_MAX_PE_PER_CPU))
+$(eval $(call add_define,CACTUS_DEFINES,FVP_CLUSTER_COUNT))
+$(eval $(call add_define,CACTUS_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
+$(eval $(call add_define,CACTUS_DEFINES,FVP_MAX_PE_PER_CPU))
+
+$(eval $(call add_define,CACTUS_MM_DEFINES,FVP_CLUSTER_COUNT))
+$(eval $(call add_define,CACTUS_MM_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
+$(eval $(call add_define,CACTUS_MM_DEFINES,FVP_MAX_PE_PER_CPU))
+
+$(eval $(call add_define,IVY_DEFINES,FVP_CLUSTER_COUNT))
+$(eval $(call add_define,IVY_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
+$(eval $(call add_define,IVY_DEFINES,FVP_MAX_PE_PER_CPU))
$(eval $(call add_define,NS_BL1U_DEFINES,FVP_CLUSTER_COUNT))
$(eval $(call add_define,NS_BL1U_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
@@ -68,6 +76,20 @@ $(eval $(call add_define,NS_BL2U_DEFINES,FVP_CLUSTER_COUNT))
$(eval $(call add_define,NS_BL2U_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
$(eval $(call add_define,NS_BL2U_DEFINES,FVP_MAX_PE_PER_CPU))
+$(eval $(call add_define,TFTF_DEFINES,FVP_CLUSTER_COUNT))
+$(eval $(call add_define,TFTF_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
+$(eval $(call add_define,TFTF_DEFINES,FVP_MAX_PE_PER_CPU))
+
+# Default PA size for FVP platform
+PA_SIZE := 34
+
+$(eval $(call add_define,CACTUS_DEFINES,PA_SIZE))
+$(eval $(call add_define,IVY_DEFINES,PA_SIZE))
+$(eval $(call add_define,NS_BL1U_DEFINES,PA_SIZE))
+$(eval $(call add_define,NS_BL2U_DEFINES,PA_SIZE))
+$(eval $(call add_define,TFTF_DEFINES,PA_SIZE))
+$(eval $(call add_define,REALM_DEFINES,PA_SIZE))
+
PLAT_INCLUDES += -Iplat/arm/fvp/include/
PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
@@ -83,6 +105,7 @@ PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
plat/arm/fvp/plat_setup.c
CACTUS_SOURCES += plat/arm/fvp/${ARCH}/plat_helpers.S
+IVY_SOURCES += plat/arm/fvp/${ARCH}/plat_helpers.S
# Firmware update is implemented on FVP.
FIRMWARE_UPDATE := 1
diff --git a/plat/arm/juno/include/platform_def.h b/plat/arm/juno/include/platform_def.h
index 0f9bb778f..2de11fd36 100644
--- a/plat/arm/juno/include/platform_def.h
+++ b/plat/arm/juno/include/platform_def.h
@@ -42,6 +42,10 @@
/* Base address of non-trusted watchdog (SP805) */
#define SP805_WDOG_BASE 0x1C0F0000
+/* Base address of trusted watchdog (SP805) */
+#define SP805_TWDOG_BASE 0x2A4A0000
+#define IRQ_TWDOG_INTID 86
+
/* Memory mapped Generic timer interfaces */
#define SYS_CNT_BASE1 0x2a830000
diff --git a/plat/arm/juno/juno32_tests_to_skip.txt b/plat/arm/juno/juno32_tests_to_skip.txt
index 078e36351..83d342e6a 100644
--- a/plat/arm/juno/juno32_tests_to_skip.txt
+++ b/plat/arm/juno/juno32_tests_to_skip.txt
@@ -4,6 +4,9 @@
# SPDX-License-Identifier: BSD-3-Clause
#
+# OS-initiated mode is not supported on AArch32 Juno.
+PSCI CPU Suspend in OSI mode
+
# System suspend is not supported on AArch32 Juno.
PSCI System Suspend Validation
PSCI STAT/Stats test cases after system suspend
diff --git a/plat/arm/juno/juno64_tests_to_skip.txt b/plat/arm/juno/juno64_tests_to_skip.txt
index 53c7e7fc5..636b0df15 100644
--- a/plat/arm/juno/juno64_tests_to_skip.txt
+++ b/plat/arm/juno/juno64_tests_to_skip.txt
@@ -6,3 +6,6 @@
# The multicore spurious interrupt test is known to cause problems on Juno
IRQ support in TSP/Multicore spurious interrupt test
+
+# OS-initiated mode is not supported on Juno
+PSCI CPU Suspend in OSI mode
diff --git a/plat/arm/n1sdp/aarch64/plat_helpers.S b/plat/arm/n1sdp/aarch64/plat_helpers.S
new file mode 100644
index 000000000..d0c6347a4
--- /dev/null
+++ b/plat/arm/n1sdp/aarch64/plat_helpers.S
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <drivers/arm/pl011.h>
+#include <platform_def.h>
+
+ .globl platform_get_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(unsigned long mpid)
+ *
+ * Function to calculate the core position on N1SDP platforms.
+ *
+ * (ClusterId * N1SDP_MAX_CPUS_PER_CLUSTER * N1SDP_MAX_PE_PER_CPU) +
+ * (CPUId * N1SDP_MAX_PE_PER_CPU) +
+ * ThreadId
+ *
+ * which can be simplified as:
+ *
+ * ((ClusterId * N1SDP_MAX_CPUS_PER_CLUSTER + CPUId) * N1SDP_MAX_PE_PER_CPU)
+ * + ThreadId
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /*
+ * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+ * look as if in a multi-threaded implementation.
+ */
+ tst x0, #MPIDR_MT_MASK
+ lsl x3, x0, #MPIDR_AFFINITY_BITS
+ csel x3, x3, x0, eq
+
+ /* Extract individual affinity fields from MPIDR */
+ ubfx x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* Compute linear position */
+ mov x3, #N1SDP_MAX_CPUS_PER_CLUSTER
+ madd x1, x2, x3, x1
+ mov x3, #N1SDP_MAX_PE_PER_CPU
+ madd x0, x1, x3, x0
+ ret
+endfunc platform_get_core_pos
+
+/* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, PLAT_ARM_UART_BASE
+ mov_imm x1, PLAT_ARM_UART_CLK_IN_HZ
+ mov_imm x2, PL011_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+/* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+/* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : x0 - x1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x1, PLAT_ARM_UART_BASE
+ b console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/arm/n1sdp/include/platform_def.h b/plat/arm/n1sdp/include/platform_def.h
new file mode 100644
index 000000000..71409fc0f
--- /dev/null
+++ b/plat/arm/n1sdp/include/platform_def.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <arch.h>
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+/* Platform binary types for linking */
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+#define N1SDP_CLUSTER_COUNT 2
+#define N1SDP_MAX_CPUS_PER_CLUSTER 2
+#define N1SDP_MAX_PE_PER_CPU 1
+
+/*******************************************************************************
+ * Run-time address of the TFTF image.
+ * It has to match the location where the Trusted Firmware-A loads the BL33
+ * image.
+ ******************************************************************************/
+#define TFTF_BASE 0xE0000000
+
+#define N1SDP_DRAM1_BASE 0x80000000
+#define N1SDP_DRAM1_SIZE 0x80000000
+#define DRAM_BASE N1SDP_DRAM1_BASE
+
+/*
+ * TF-A reserves DRAM space 0xFD000000 - 0xFEFFFFFF for Trusted DRAM
+ * TF-A reserves DRAM space 0xFF000000 - 0xFFFFFFFF for TZC
+ */
+#define ARM_TZC_DRAM1_SIZE 0x00200000 /* 2MB */
+#define ARM_TRUSTED_DRAM1_SIZE 0x0E000000 /* 16MB */
+
+#define DRAM_SIZE (N1SDP_DRAM1_SIZE - \
+ ARM_TRUSTED_DRAM1_SIZE - \
+ ARM_TZC_DRAM1_SIZE)
+
+/* REFCLK CNTControl, Generic Timer. Secure Access only. */
+#define SYS_CNT_CONTROL_BASE 0x2a430000
+/* REFCLK CNTRead, Generic Timer. */
+#define SYS_CNT_READ_BASE 0x2a800000
+/* AP_REFCLK CNTBase1, Generic Timer. */
+#define SYS_CNT_BASE1 0x2A830000
+
+/* Base address of non-trusted watchdog (SP805) */
+#define SP805_WDOG_BASE 0x1C0F0000
+
+/* Base address of trusted watchdog (SP805) */
+#define SP805_TWDOG_BASE 0x2A480000
+#define IRQ_TWDOG_INTID 86
+
+/* Base address and size of external NVM flash */
+#define FLASH_BASE 0x08000000
+
+#define NOR_FLASH_BLOCK_SIZE 0x40000 /* 256KB */
+#define FLASH_SIZE 0x4000000 /* 64MB */
+
+/*
+ * If you want to use DRAM for non-volatile memory then the first 128MB
+ * can be used. However for tests that involve power resets this is not
+ * suitable since the state will be lost.
+ */
+#define TFTF_NVM_OFFSET 0x0
+#define TFTF_NVM_SIZE 0x8000000 /* 128 MB */
+
+/* Sub-system Peripherals */
+#define N1SDP_DEVICE0_BASE 0x08000000
+#define N1SDP_DEVICE0_SIZE 0x48000000
+
+/* N1SDP remote chip at 4 TB offset */
+#define PLAT_ARM_REMOTE_CHIP_OFFSET (ULL(1) << 42)
+
+/* Following covers remote n1sdp */
+#define N1SDP_DEVICE1_BASE (N1SDP_DEVICE0_BASE + PLAT_ARM_REMOTE_CHIP_OFFSET)
+#define N1SDP_DEVICE1_SIZE N1SDP_DEVICE0_SIZE
+
+/* GIC-600 & interrupt handling related constants */
+#define N1SDP_GICD_BASE 0x30000000
+#define N1SDP_GICR_BASE 0x300C0000
+#define N1SDP_GICC_BASE 0x2C000000
+
+/* SoC's PL011 UART0 related constants */
+#define PL011_UART0_BASE 0x2A400000
+#define PL011_UART0_CLK_IN_HZ 50000000
+
+/*
+ * SoC's PL011 UART1 related constants (duplicated from UART0 since AP UART1
+ * isn't accessible on N1SDP)
+ */
+#define PL011_UART1_BASE 0x2A400000
+#define PL011_UART1_CLK_IN_HZ 50000000
+
+#define PLAT_ARM_UART_BASE PL011_UART0_BASE
+#define PLAT_ARM_UART_CLK_IN_HZ PL011_UART0_CLK_IN_HZ
+
+/* Size of cacheable stacks */
+#define PLATFORM_STACK_SIZE 0x1400
+
+/* Size of coherent stacks */
+#define PCPU_DV_MEM_STACK_SIZE 0x600
+
+#define PLATFORM_CORE_COUNT (N1SDP_CLUSTER_COUNT * N1SDP_MAX_CPUS_PER_CLUSTER)
+#define PLATFORM_NUM_AFFS (N1SDP_CLUSTER_COUNT + PLATFORM_CORE_COUNT)
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL1
+
+#define PLAT_MAX_PWR_LEVEL PLATFORM_MAX_AFFLVL
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+/* I/O Storage NOR flash device */
+#define MAX_IO_DEVICES 1
+#define MAX_IO_HANDLES 1
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH 4
+
+/* Platform specific page table and MMU setup constants */
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 36)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 36)
+
+#if IMAGE_CACTUS
+#define MAX_XLAT_TABLES 6
+#else
+#define MAX_XLAT_TABLES 5
+#endif
+#define MAX_MMAP_REGIONS 16
+
+/*******************************************************************************
+ * Used to align variables on the biggest cache line size in the platform.
+ * This is known only to the platform as it might have a combination of
+ * integrated and external caches.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/* Non-Secure Software Generated Interupts IDs */
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_7 7
+
+/*
+ * AP UART1 interrupt is considered as the maximum SPI.
+ * MAX_SPI_ID = MIN_SPI_ID + PLAT_MAX_SPI_OFFSET_ID = 96
+ */
+#define PLAT_MAX_SPI_OFFSET_ID 64
+
+/* AP_REFCLK Generic Timer, Non-secure. */
+#define IRQ_CNTPSIRQ1 92
+
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER 26
+
+/* Times(in ms) used by test code for completion of different events */
+#define PLAT_SUSPEND_ENTRY_TIME 0x100
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 0x200
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/n1sdp/n1sdp_mem_prot.c b/plat/arm/n1sdp/n1sdp_mem_prot.c
new file mode 100644
index 000000000..1ee602d35
--- /dev/null
+++ b/plat/arm/n1sdp/n1sdp_mem_prot.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2020, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+#define N1SDP_DRAM1_NS_START (TFTF_BASE + 0x4000000)
+#define N1SDP_DRAM1_NS_SIZE 0x10000000
+
+static const mem_region_t n1sdp_ram_ranges[] = {
+ { N1SDP_DRAM1_NS_START, N1SDP_DRAM1_NS_SIZE }
+};
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+ *nelem = ARRAY_SIZE(n1sdp_ram_ranges);
+ return n1sdp_ram_ranges;
+}
diff --git a/plat/arm/n1sdp/n1sdp_pwr_state.c b/plat/arm/n1sdp/n1sdp_pwr_state.c
new file mode 100644
index 000000000..d78f2eee0
--- /dev/null
+++ b/plat/arm/n1sdp/n1sdp_pwr_state.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+#include <psci.h>
+
+/* State IDs for local power states on N1SDP platform. */
+#define N1SDP_PS_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
+#define N1SDP_PS_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define N1SDP_PS_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
+
+/* Suspend depth definitions for each power state */
+#define N1SDP_PS_RUN_DEPTH 0
+#define N1SDP_PS_RETENTION_DEPTH 1
+#define N1SDP_PS_OFF_DEPTH 2
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+ {N1SDP_PS_RETENTION_DEPTH, N1SDP_PS_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+ {N1SDP_PS_OFF_DEPTH, N1SDP_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0}
+};
+
+/* The state property array with details of idle state possible for the cluster */
+static const plat_state_prop_t cluster_state_prop[] = {
+ {N1SDP_PS_OFF_DEPTH, N1SDP_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0}
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+ switch (level) {
+ case MPIDR_AFFLVL0:
+ return core_state_prop;
+ case MPIDR_AFFLVL1:
+ return cluster_state_prop;
+ default:
+ return NULL;
+ }
+}
diff --git a/plat/arm/n1sdp/n1sdp_topology.c b/plat/arm/n1sdp/n1sdp_topology.c
new file mode 100644
index 000000000..40750ced1
--- /dev/null
+++ b/plat/arm/n1sdp/n1sdp_topology.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <plat_topology.h>
+#include <tftf_lib.h>
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} n1sdp_cores[] = {
+ /* N1SDP has 2 clusters with 2 cores each */
+ { 0, 0 },
+ { 0, 1 },
+ { 1, 0 },
+ { 1, 1 },
+};
+
+/*
+ * The power domain tree descriptor. The cluster power domains are
+ * arranged so that when the PSCI generic code creates the power domain tree,
+ * the indices of the CPU power domain nodes it allocates match the linear
+ * indices returned by plat_core_pos_by_mpidr().
+ */
+const unsigned char n1sdp_pd_tree_desc[] = {
+ /* Number of root nodes */
+ N1SDP_CLUSTER_COUNT,
+ /* Number of children for the 1st node */
+ N1SDP_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 2nd node */
+ N1SDP_MAX_CPUS_PER_CLUSTER
+};
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return n1sdp_pd_tree_desc;
+}
+
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ uint64_t mpid;
+
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ mpid = (uint64_t)make_mpid(n1sdp_cores[core_pos].cluster_id,
+ n1sdp_cores[core_pos].cpu_id);
+
+ return mpid;
+}
diff --git a/plat/arm/n1sdp/plat_setup.c b/plat/arm/n1sdp/plat_setup.c
new file mode 100644
index 000000000..1c04c264c
--- /dev/null
+++ b/plat/arm/n1sdp/plat_setup.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/arm/arm_gic.h>
+#include <xlat_tables_v2.h>
+
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(N1SDP_DEVICE0_BASE, N1SDP_DEVICE0_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ /*MAP_REGION_FLAT(N1SDP_DEVICE1_BASE, N1SDP_DEVICE1_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),*/
+ MAP_REGION_FLAT(DRAM_BASE, TFTF_BASE - DRAM_BASE,
+ MT_MEMORY | MT_RW | MT_NS),
+ {0}
+};
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+ return mmap;
+}
+
+void plat_arm_gic_init(void)
+{
+ arm_gic_init(N1SDP_GICC_BASE, N1SDP_GICD_BASE, N1SDP_GICR_BASE);
+}
diff --git a/plat/arm/n1sdp/platform.mk b/plat/arm/n1sdp/platform.mk
new file mode 100644
index 000000000..39c33ab89
--- /dev/null
+++ b/plat/arm/n1sdp/platform.mk
@@ -0,0 +1,38 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+N1SDP_MAX_PE_PER_CPU := 1
+
+$(eval $(call add_define,TFTF_DEFINES,N1SDP_MAX_PE_PER_CPU))
+$(eval $(call add_define,NS_BL1U_DEFINES,N1SDP_MAX_PE_PER_CPU))
+$(eval $(call add_define,NS_BL2U_DEFINES,N1SDP_MAX_PE_PER_CPU))
+
+PLAT_INCLUDES += -Iplat/arm/n1sdp/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/gic/gic_v3.c \
+ drivers/arm/sp805/sp805.c \
+ drivers/arm/timer/private_timer.c \
+ drivers/arm/timer/system_timer.c \
+ plat/arm/n1sdp/${ARCH}/plat_helpers.S \
+ plat/arm/n1sdp/plat_setup.c \
+ plat/arm/n1sdp/n1sdp_mem_prot.c \
+ plat/arm/n1sdp/n1sdp_pwr_state.c \
+ plat/arm/n1sdp/n1sdp_topology.c
+
+CACTUS_SOURCES += plat/arm/n1sdp/${ARCH}/plat_helpers.S
+IVY_SOURCES += plat/arm/n1sdp/${ARCH}/plat_helpers.S
+
+PLAT_TESTS_SKIP_LIST := plat/arm/n1sdp/tests_to_skip.txt
+
+ifeq (${USE_NVM},1)
+$(error "USE_NVM is not supported on N1SDP platforms")
+endif
+
+$(warning "TFTF on N1SDP is still in development and there may be issues")
+
+include plat/arm/common/arm_common.mk
diff --git a/plat/arm/n1sdp/tests_to_skip.txt b/plat/arm/n1sdp/tests_to_skip.txt
new file mode 100644
index 000000000..b6e87bfad
--- /dev/null
+++ b/plat/arm/n1sdp/tests_to_skip.txt
@@ -0,0 +1,33 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Disable SMMUv3 tests
+SMMUv3 tests
+
+# OS-initiated mode is not supported on N1SDP
+PSCI CPU Suspend in OSI mode
+
+# PSCI is enabled but not tested
+PSCI STAT/Stats test cases after system suspend
+PSCI System Suspend Validation
+
+# Disable FF-A Interrupt tests as TWDOG is not supported by TC platform
+FF-A Interrupt
+
+# The following tests were disabled to make TFTF successfully run on N1SDP
+#
+# There is an issue where issuing an SGI to a powered off core will not wake it up
+#
+# Most of these tests should work but something is likely wrong with the plat
+# files in TFTF, since the port was done purely to test the spectre workaround
+# performance impact. Once that was done no further work was done on the port.
+
+Timer framework Validation/Target timer to a power down cpu
+Timer framework Validation/Test scenario where multiple CPUs call same timeout
+Timer framework Validation/Stress test the timer framework
+PSCI Affinity Info/Affinity info level0 powerdown
+PSCI CPU Suspend
+PSCI STAT/for valid composite state CPU suspend
diff --git a/plat/arm/sgi/common/aarch64/plat_helpers.S b/plat/arm/neoverse_rd/common/arch/aarch64/plat_helpers.S
index 9fe2ba962..122d0b93b 100644
--- a/plat/arm/sgi/common/aarch64/plat_helpers.S
+++ b/plat/arm/neoverse_rd/common/arch/aarch64/plat_helpers.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -16,15 +16,15 @@
/*----------------------------------------------------------------------
* unsigned int platform_get_core_pos(unsigned long mpid)
*
- * Function to calculate the core position on sgi platforms.
+ * Function to calculate the core position on Neoverse RD platforms.
*
- * (ClusterId * SGI_MAX_CPUS_PER_CLUSTER * SGI_MAX_PE_PER_CPU) +
- * (CPUId * SGI_MAX_PE_PER_CPU) +
+ * (ClusterId * NRD_MAX_CPUS_PER_CLUSTER * NRD_MAX_PE_PER_CPU) +
+ * (CPUId * NRD_MAX_PE_PER_CPU) +
* ThreadId
*
* which can be simplified as:
*
- * ((ClusterId * SGI_MAX_CPUS_PER_CLUSTER + CPUId) * SGI_MAX_PE_PER_CPU)
+ * ((ClusterId * NRD_MAX_CPUS_PER_CLUSTER + CPUId) * NRD_MAX_PE_PER_CPU)
* + ThreadId
* ---------------------------------------------------------------------
*/
@@ -43,9 +43,9 @@ func platform_get_core_pos
ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
/* Compute linear position */
- mov x3, #SGI_MAX_CPUS_PER_CLUSTER
+ mov x3, #NRD_MAX_CPUS_PER_CLUSTER
madd x1, x2, x3, x1
- mov x3, #SGI_MAX_PE_PER_CPU
+ mov x3, #NRD_MAX_PE_PER_CPU
madd x0, x1, x3, x0
ret
endfunc platform_get_core_pos
diff --git a/plat/arm/sgi/common/include/sgi_base_platform_def.h b/plat/arm/neoverse_rd/common/include/nrd_base_platform_def.h
index 8e025ab91..6a7e48727 100644
--- a/plat/arm/sgi/common/include/sgi_base_platform_def.h
+++ b/plat/arm/neoverse_rd/common/include/nrd_base_platform_def.h
@@ -1,41 +1,58 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef SGI_BASE_PLATFORM_DEF_H
-#define SGI_BASE_PLATFORM_DEF_H
+#ifndef NRD_BASE_PLATFORM_DEF_H
+#define NRD_BASE_PLATFORM_DEF_H
+
+#include <lib/utils_def.h>
/* Platform binary types for linking */
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
-/*******************************************************************************
- * Run-time address of the TFTF image.
- * It has to match the location where the Trusted Firmware-A loads the BL33
- * image.
- ******************************************************************************/
-#define TFTF_BASE 0xE0000000
+/* Sub-system Peripherals */
+#define NRD_DEVICE0_BASE UL(0x2A000000)
+#define NRD_DEVICE0_SIZE UL(0x26000000)
-#define SGI_DRAM1_BASE 0x80000000
-#define SGI_DRAM1_SIZE 0x80000000
-#define DRAM_BASE SGI_DRAM1_BASE
+/* Peripherals and PCIe expansion area */
+#define NRD_DEVICE1_BASE UL(0x60000000)
+#define NRD_DEVICE1_SIZE UL(0x20000000)
-/* TF-A reserves DRAM space 0xFF000000- 0xFFFFFFFF for TZC */
-#define DRAM_SIZE (SGI_DRAM1_SIZE - 0x1000000)
+/* AP Non-Secure UART related constants */
+#define NRD_CSS_NSEC_UART_BASE UL(0x2A400000)
+#define NRD_CSS_NSEC_CLK_IN_HZ 7372800
+
+#define PLAT_ARM_UART_BASE NRD_CSS_NSEC_UART_BASE
+#define PLAT_ARM_UART_CLK_IN_HZ NRD_CSS_NSEC_CLK_IN_HZ
+
+/* Base address of trusted watchdog (SP805) */
+#define SP805_TWDOG_BASE UL(0x2A480000)
/* Memory mapped Generic timer interfaces */
-#define SYS_CNT_BASE1 0x2A830000
+#define SYS_CNT_BASE1 UL(0x2A830000)
-/* Base address of non-trusted watchdog (SP805) */
-#define SP805_WDOG_BASE 0x1C0F0000
+/* DRAM base address and size */
+#define PLAT_ARM_DRAM1_BASE UL(0x80000000)
+#define PLAT_ARM_DRAM1_SIZE UL(0x80000000)
+#define DRAM_BASE PLAT_ARM_DRAM1_BASE
+
+/* TF-A reserves DRAM space 0xFF000000- 0xFFFFFFFF for TZC */
+#define DRAM_SIZE (PLAT_ARM_DRAM1_SIZE - 0x1000000)
/* Base address and size of external NVM flash */
-#define FLASH_BASE 0x08000000
+#define FLASH_BASE UL(0x08000000)
+#define FLASH_SIZE UL(0x04000000) /* 64MB */
+#define NOR_FLASH_BLOCK_SIZE UL(0x40000) /* 256KB */
-#define NOR_FLASH_BLOCK_SIZE 0x40000 /* 256KB */
-#define FLASH_SIZE 0x4000000 /* 64MB */
+/*******************************************************************************
+ * Run-time address of the TFTF image.
+ * It has to match the location where the Trusted Firmware-A loads the BL33
+ * image.
+ ******************************************************************************/
+#define TFTF_BASE UL(0xE0000000)
/*
* If you want to use DRAM for non-volatile memory then the first 128MB
@@ -43,27 +60,7 @@
* suitable since the state will be lost.
*/
#define TFTF_NVM_OFFSET 0x0
-#define TFTF_NVM_SIZE 0x8000000 /* 128 MB */
-
-/* Sub-system Peripherals */
-#define SGI_DEVICE0_BASE 0x2A000000
-#define SGI_DEVICE0_SIZE 0x26000000
-
-/* Following covers Peripherals and PCIe expansion area */
-#define SGI_DEVICE1_BASE 0x60000000
-#define SGI_DEVICE1_SIZE 0x20000000
-
-/* GIC-600 & interrupt handling related constants */
-#define SGI_GICD_BASE 0x30000000
-#define SGI_GICR_BASE 0x300C0000
-#define SGI_GICC_BASE 0x2C000000
-
-/* SoC's PL011 UART0 related constants */
-#define SGI_PL011_UART0_BASE 0x7FF80000
-#define SGI_PL011_UART0_CLK_IN_HZ 7372800
-
-#define PLAT_ARM_UART_BASE SGI_PL011_UART0_BASE
-#define PLAT_ARM_UART_CLK_IN_HZ SGI_PL011_UART0_CLK_IN_HZ
+#define TFTF_NVM_SIZE UL(0x08000000) /* 128 MB */
/* Size of cacheable stacks */
#define PLATFORM_STACK_SIZE 0x1400
@@ -71,24 +68,19 @@
/* Size of coherent stacks */
#define PCPU_DV_MEM_STACK_SIZE 0x600
-#define PLATFORM_CORE_COUNT (SGI_CLUSTER_COUNT * SGI_MAX_CPUS_PER_CLUSTER)
-#define PLATFORM_NUM_AFFS (SGI_CLUSTER_COUNT + PLATFORM_CORE_COUNT)
+#define PLATFORM_CORE_COUNT (PLAT_ARM_CLUSTER_COUNT * \
+ NRD_MAX_CPUS_PER_CLUSTER)
+#define PLATFORM_NUM_AFFS (PLAT_ARM_CLUSTER_COUNT + PLATFORM_CORE_COUNT)
#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL1
#define PLAT_MAX_PWR_LEVEL PLATFORM_MAX_AFFLVL
#define PLAT_MAX_PWR_STATES_PER_LVL 2
-/* I/O Storage NOR flash device */
-#define MAX_IO_DEVICES 1
-#define MAX_IO_HANDLES 1
-
/* Local state bit width for each level in the state-ID field of power state */
#define PLAT_LOCAL_PSTATE_WIDTH 4
/* Platform specific page table and MMU setup constants */
-#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 32)
-#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 32)
-#define MAX_XLAT_TABLES 5
+#define MAX_XLAT_TABLES 6
#define MAX_MMAP_REGIONS 16
/*******************************************************************************
@@ -96,24 +88,22 @@
* This is known only to the platform as it might have a combination of
* integrated and external caches.
******************************************************************************/
-#define CACHE_WRITEBACK_SHIFT 6
-#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/* Times(in ms) used by test code for completion of different events */
+#define PLAT_SUSPEND_ENTRY_TIME 15
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 30
+
+/* I/O Storage NOR flash device */
+#define MAX_IO_DEVICES 1
+#define MAX_IO_HANDLES 1
/* Non-Secure Software Generated Interupts IDs */
#define IRQ_NS_SGI_0 0
#define IRQ_NS_SGI_7 7
-/* AP UART1 interrupt is considered as the maximum SPI */
-#define PLAT_MAX_SPI_OFFSET_ID 64
-
-/* AP_REFCLK Generic Timer, Non-secure. */
-#define IRQ_CNTPSIRQ1 92
-
/* Per-CPU Hypervisor Timer Interrupt ID */
#define IRQ_PCPU_HP_TIMER 26
-/* Times(in ms) used by test code for completion of different events */
-#define PLAT_SUSPEND_ENTRY_TIME 15
-#define PLAT_SUSPEND_ENTRY_EXIT_TIME 30
-
-#endif /* SGI_BASE_PLATFORM_DEF_H */
+#endif /* NRD_BASE_PLATFORM_DEF_H */
diff --git a/plat/arm/neoverse_rd/common/include/nrd_soc_css_def.h b/plat/arm/neoverse_rd/common/include/nrd_soc_css_def.h
new file mode 100644
index 000000000..bffe1895e
--- /dev/null
+++ b/plat/arm/neoverse_rd/common/include/nrd_soc_css_def.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NRD_SOC_CSS_DEF_H
+#define NRD_SOC_CSS_DEF_H
+
+/* Trusted watchdog (SP805) Interrupt ID */
+#define IRQ_TWDOG_INTID 86
+
+/* Maximum SPI */
+#define PLAT_MAX_SPI_OFFSET_ID 64
+
+/* AP_REFCLK Generic Timer, Non-secure. */
+#define IRQ_CNTPSIRQ1 92
+
+#endif /* NRD_SOC_CSS_DEF_H */
diff --git a/plat/arm/neoverse_rd/common/include/nrd_soc_css_def_v2.h b/plat/arm/neoverse_rd/common/include/nrd_soc_css_def_v2.h
new file mode 100644
index 000000000..5d4f5e670
--- /dev/null
+++ b/plat/arm/neoverse_rd/common/include/nrd_soc_css_def_v2.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NRD_SOC_CSS_DEF_V2_H
+#define NRD_SOC_CSS_DEF_V2_H
+
+/* Trusted watchdog (SP805) Interrupt ID */
+#define IRQ_TWDOG_INTID 107
+
+/* Maximum SPI */
+#define PLAT_MAX_SPI_OFFSET_ID 256
+
+/* AP_REFCLK Generic Timer, Non-secure. */
+#define IRQ_CNTPSIRQ1 109
+
+#endif /* NRD_SOC_CSS_DEF_V2_H */
+
diff --git a/plat/arm/neoverse_rd/common/include/nrd_soc_platform_def.h b/plat/arm/neoverse_rd/common/include/nrd_soc_platform_def.h
new file mode 100644
index 000000000..d329688ff
--- /dev/null
+++ b/plat/arm/neoverse_rd/common/include/nrd_soc_platform_def.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NRD_SOC_PLATFORM_H
+#define NRD_SOC_PLATFORM_H
+
+#include <nrd_base_platform_def.h>
+#include <nrd_soc_css_def.h>
+
+/* Base address of non-trusted watchdog (SP805) */
+#define SP805_WDOG_BASE UL(0x1C0F0000)
+
+#endif /* NRD_SOC_PLATFORM_H */
diff --git a/plat/arm/neoverse_rd/common/include/nrd_soc_platform_def_v2.h b/plat/arm/neoverse_rd/common/include/nrd_soc_platform_def_v2.h
new file mode 100644
index 000000000..7a23c514f
--- /dev/null
+++ b/plat/arm/neoverse_rd/common/include/nrd_soc_platform_def_v2.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef NRD_SOC_PLATFORM_V2_H
+#define NRD_SOC_PLATFORM_V2_H
+
+#include <nrd_base_platform_def.h>
+#include <nrd_soc_css_def_v2.h>
+
+/* Base address of non-trusted watchdog (SP805) */
+#define SP805_WDOG_BASE UL(0x0C0F0000)
+
+#endif /* NRD_SOC_PLATFORM_V2_H */
diff --git a/plat/arm/neoverse_rd/common/nrd_common.mk b/plat/arm/neoverse_rd/common/nrd_common.mk
new file mode 100644
index 000000000..10ee08b3c
--- /dev/null
+++ b/plat/arm/neoverse_rd/common/nrd_common.mk
@@ -0,0 +1,27 @@
+#
+# Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PLAT_INCLUDES := -Iplat/arm/neoverse_rd/common/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/gic/gic_v3.c \
+ drivers/arm/sp805/sp805.c \
+ drivers/arm/timer/private_timer.c \
+ drivers/arm/timer/system_timer.c \
+ plat/arm/neoverse_rd/common/arch/${ARCH}/plat_helpers.S\
+ plat/arm/neoverse_rd/common/plat_setup.c \
+ plat/arm/neoverse_rd/common/nrd_mem_prot.c \
+ plat/arm/neoverse_rd/common/nrd_pwr_state.c
+
+include plat/arm/common/arm_common.mk
+
+ifeq (${USE_NVM},1)
+$(error "USE_NVM is not supported on Neoverse RD platforms")
+endif
+
+# Pass NRD_PLATFORM_VARIANT flag to the build system
+$(eval $(call add_define,TFTF_DEFINES,NRD_PLATFORM_VARIANT))
diff --git a/plat/arm/neoverse_rd/common/nrd_mem_prot.c b/plat/arm/neoverse_rd/common/nrd_mem_prot.c
new file mode 100644
index 000000000..9ea25e8d6
--- /dev/null
+++ b/plat/arm/neoverse_rd/common/nrd_mem_prot.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+#define NRD_DRAM1_NS_START (TFTF_BASE + 0x4000000)
+#define NRD_DRAM1_NS_SIZE 0x10000000
+
+static const mem_region_t nrd_ram_ranges[] = {
+ { NRD_DRAM1_NS_START, NRD_DRAM1_NS_SIZE },
+};
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+ *nelem = ARRAY_SIZE(nrd_ram_ranges);
+ return nrd_ram_ranges;
+}
diff --git a/plat/arm/sgi/common/sgi_pwr_state.c b/plat/arm/neoverse_rd/common/nrd_pwr_state.c
index 305d1f505..31f81dd66 100644
--- a/plat/arm/sgi/common/sgi_pwr_state.c
+++ b/plat/arm/neoverse_rd/common/nrd_pwr_state.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,27 +7,27 @@
#include <platform.h>
#include <psci.h>
-/* State IDs for local power states on SGI platforms. */
-#define SGI_PS_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
-#define SGI_PS_RETENTION_STATE_ID 1 /* Valid for only CPUs */
-#define SGI_PS_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
+/* State IDs for local power states on Neoverse RD platforms. */
+#define NRD_PS_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
+#define NRD_PS_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define NRD_PS_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
/* Suspend depth definitions for each power state */
-#define SGI_PS_RUN_DEPTH 0
-#define SGI_PS_RETENTION_DEPTH 1
-#define SGI_PS_OFF_DEPTH 2
+#define NRD_PS_RUN_DEPTH 0
+#define NRD_PS_RETENTION_DEPTH 1
+#define NRD_PS_OFF_DEPTH 2
/* The state property array with details of idle state possible for the core */
static const plat_state_prop_t core_state_prop[] = {
- {SGI_PS_RETENTION_DEPTH, SGI_PS_RETENTION_STATE_ID,
+ {NRD_PS_RETENTION_DEPTH, NRD_PS_RETENTION_STATE_ID,
PSTATE_TYPE_STANDBY},
- {SGI_PS_OFF_DEPTH, SGI_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {NRD_PS_OFF_DEPTH, NRD_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
{0},
};
/* The state property array with details of idle state possible for the cluster */
static const plat_state_prop_t cluster_state_prop[] = {
- {SGI_PS_OFF_DEPTH, SGI_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {NRD_PS_OFF_DEPTH, NRD_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
{0},
};
diff --git a/plat/arm/sgi/common/plat_setup.c b/plat/arm/neoverse_rd/common/plat_setup.c
index 4b15f1a7e..4734bf00e 100644
--- a/plat/arm/sgi/common/plat_setup.c
+++ b/plat/arm/neoverse_rd/common/plat_setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,9 +8,9 @@
#include <xlat_tables_v2.h>
static const mmap_region_t mmap[] = {
- MAP_REGION_FLAT(SGI_DEVICE0_BASE, SGI_DEVICE0_SIZE,
+ MAP_REGION_FLAT(NRD_DEVICE0_BASE, NRD_DEVICE0_SIZE,
MT_DEVICE | MT_RW | MT_NS),
- MAP_REGION_FLAT(SGI_DEVICE1_BASE, SGI_DEVICE1_SIZE,
+ MAP_REGION_FLAT(NRD_DEVICE1_BASE, NRD_DEVICE1_SIZE,
MT_DEVICE | MT_RW | MT_NS),
MAP_REGION_FLAT(DRAM_BASE, TFTF_BASE - DRAM_BASE,
MT_MEMORY | MT_RW | MT_NS),
@@ -24,5 +24,5 @@ const mmap_region_t *tftf_platform_get_mmap(void)
void plat_arm_gic_init(void)
{
- arm_gic_init(SGI_GICC_BASE, SGI_GICD_BASE, SGI_GICR_BASE);
+ arm_gic_init(PLAT_ARM_GICC_BASE, PLAT_ARM_GICD_BASE, PLAT_ARM_GICR_BASE);
}
diff --git a/plat/arm/neoverse_rd/platform/rdn1edge/include/platform_def.h b/plat/arm/neoverse_rd/platform/rdn1edge/include/platform_def.h
new file mode 100644
index 000000000..2a3c7ac60
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdn1edge/include/platform_def.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2019-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include "nrd_soc_platform_def.h"
+
+#define PLAT_ARM_CLUSTER_COUNT U(2)
+#define NRD_MAX_CPUS_PER_CLUSTER U(4)
+#define NRD_MAX_PE_PER_CPU U(1)
+
+/* GIC related constants */
+#define PLAT_ARM_GICD_BASE UL(0x30000000)
+#define PLAT_ARM_GICC_BASE UL(0x2C000000)
+#define PLAT_ARM_GICR_BASE UL(0x300C0000)
+
+/* Platform specific page table and MMU setup constants */
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/neoverse_rd/platform/rdn1edge/platform.mk b/plat/arm/neoverse_rd/platform/rdn1edge/platform.mk
new file mode 100644
index 000000000..9f92efd40
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdn1edge/platform.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (c) 2019-2024, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include plat/arm/neoverse_rd/common/nrd_common.mk
+
+PLAT_INCLUDES += -Iplat/arm/neoverse_rd/platform/rdn1edge/include/
+
+PLAT_SOURCES += plat/arm/neoverse_rd/platform/rdn1edge/topology.c
+
+PLAT_TESTS_SKIP_LIST := plat/arm/neoverse_rd/platform/rdn1edge/tests_to_skip.txt
+
+ifdef NRD_PLATFORM_VARIANT
+$(error "NRD_PLATFORM_VARIANT should not be set for RD-N1-Edge, \
+ currently set to ${NRD_PLATFORM_VARIANT}.")
+endif
diff --git a/plat/arm/rdinfra/rdn1edge/tests_to_skip.txt b/plat/arm/neoverse_rd/platform/rdn1edge/tests_to_skip.txt
index 7fda40b74..63418098d 100644
--- a/plat/arm/rdinfra/rdn1edge/tests_to_skip.txt
+++ b/plat/arm/neoverse_rd/platform/rdn1edge/tests_to_skip.txt
@@ -1,9 +1,12 @@
#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2024, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# OS-initiated mode is not supported on RD-N1Edge
+PSCI CPU Suspend in OSI mode
+
# System suspend is not supported as there are no wakeup sources in RD-N1Edge FVP
PSCI STAT/Stats test cases after system suspend
PSCI System Suspend Validation
diff --git a/plat/arm/rdinfra/rdn1edge/topology.c b/plat/arm/neoverse_rd/platform/rdn1edge/topology.c
index 5521de455..cb79ba66b 100644
--- a/plat/arm/rdinfra/rdn1edge/topology.c
+++ b/plat/arm/neoverse_rd/platform/rdn1edge/topology.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -32,11 +32,11 @@ static const struct {
*/
const unsigned char plat_pd_tree_desc[] = {
/* Number of root nodes */
- SGI_CLUSTER_COUNT,
+ PLAT_ARM_CLUSTER_COUNT,
/* Number of children for the 1st node */
- SGI_MAX_CPUS_PER_CLUSTER,
+ NRD_MAX_CPUS_PER_CLUSTER,
/* Number of children for the 2nd node */
- SGI_MAX_CPUS_PER_CLUSTER
+ NRD_MAX_CPUS_PER_CLUSTER
};
const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
diff --git a/plat/arm/neoverse_rd/platform/rdn2/include/platform_def.h b/plat/arm/neoverse_rd/platform/rdn2/include/platform_def.h
new file mode 100644
index 000000000..177d91153
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdn2/include/platform_def.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <nrd_soc_platform_def_v2.h>
+
+/*
+ * The RD-N2 Cfg1 platform is a variant of the RD-N2 platform with a
+ * reduced interconnect mesh size (3x3) and core count (8-cores).
+ *
+ * The $NRD_PLATFORM_VARIANT flag is set to 1 for RD-N2-Cfg1 platform.
+ */
+#if (NRD_PLATFORM_VARIANT == 1)
+#define PLAT_ARM_CLUSTER_COUNT U(8)
+#else
+#define PLAT_ARM_CLUSTER_COUNT U(16)
+#endif
+#define NRD_MAX_CPUS_PER_CLUSTER U(1)
+#define NRD_MAX_PE_PER_CPU U(1)
+
+/* GIC-600 & interrupt handling related constants */
+#define PLAT_ARM_GICD_BASE UL(0x30000000)
+#if (NRD_PLATFORM_VARIANT == 1)
+#define PLAT_ARM_GICR_BASE UL(0x30100000)
+#else
+#define PLAT_ARM_GICR_BASE UL(0x301C0000)
+#endif
+#define PLAT_ARM_GICC_BASE UL(0x2C000000)
+
+/* Platform specific page table and MMU setup constants */
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 46)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 46)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/neoverse_rd/platform/rdn2/platform.mk b/plat/arm/neoverse_rd/platform/rdn2/platform.mk
new file mode 100644
index 000000000..31d91a13a
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdn2/platform.mk
@@ -0,0 +1,21 @@
+#
+# Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include plat/arm/neoverse_rd/common/nrd_common.mk
+
+PLAT_INCLUDES += -Iplat/arm/neoverse_rd/platform/rdn2/include/
+
+PLAT_SOURCES += plat/arm/neoverse_rd/platform/rdn2/topology.c
+
+PLAT_TESTS_SKIP_LIST := plat/arm/neoverse_rd/platform/rdn2/tests_to_skip.txt
+
+RD_N2_VARIANTS := 0 1 3
+
+ifneq ($(NRD_PLATFORM_VARIANT), \
+ $(filter $(NRD_PLATFORM_VARIANT),$(RD_N2_VARIANTS)))
+ $(error "NRD_PLATFORM_VARIANT for RD-N2 should be 0 1 or 3, currently \
+ set to ${NRD_PLATFORM_VARIANT}.")
+endif
diff --git a/plat/arm/neoverse_rd/platform/rdn2/tests_to_skip.txt b/plat/arm/neoverse_rd/platform/rdn2/tests_to_skip.txt
new file mode 100644
index 000000000..2c9aceeeb
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdn2/tests_to_skip.txt
@@ -0,0 +1,16 @@
+#
+# Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# OS-initiated mode is not supported on RD-N2
+PSCI CPU Suspend in OSI mode
+
+# System suspend is not supported as there are no wakeup sources in RD-N2 FVP
+PSCI STAT/Stats test cases after system suspend
+PSCI System Suspend Validation
+
+# The following test cases result in unhandled exception at EL3
+CPU extensions/Use trace buffer control Registers
+CPU extensions/Use trace filter control Registers
diff --git a/plat/arm/neoverse_rd/platform/rdn2/topology.c b/plat/arm/neoverse_rd/platform/rdn2/topology.c
new file mode 100644
index 000000000..3cd312143
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdn2/topology.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <plat_topology.h>
+#include <tftf_lib.h>
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} plat_cores[] = {
+ /* Cluster0: 1 core */
+ { 0, 0 },
+ /* Cluster1: 1 core */
+ { 1, 0 },
+ /* Cluster2: 1 core */
+ { 2, 0 },
+ /* Cluster3: 1 core */
+ { 3, 0 },
+ /* Cluster4: 1 core */
+ { 4, 0 },
+ /* Cluster5: 1 core */
+ { 5, 0 },
+ /* Cluster6: 1 core */
+ { 6, 0 },
+ /* Cluster7: 1 core */
+ { 7, 0 },
+#if (NRD_PLATFORM_VARIANT == 0)
+ /* Cluster8: 1 core */
+ { 8, 0 },
+ /* Cluster9: 1 core */
+ { 9, 0 },
+ /* Cluster10: 1 core */
+ { 10, 0 },
+ /* Cluster11: 1 core */
+ { 11, 0 },
+ /* Cluster12: 1 core */
+ { 12, 0 },
+ /* Cluster13: 1 core */
+ { 13, 0 },
+ /* Cluster14: 1 core */
+ { 14, 0 },
+ /* Cluster15: 1 core */
+ { 15, 0 },
+#endif
+};
+
+/*
+ * The power domain tree descriptor. The cluster power domains are
+ * arranged so that when the PSCI generic code creates the power domain tree,
+ * the indices of the CPU power domain nodes it allocates match the linear
+ * indices returned by plat_core_pos_by_mpidr().
+ */
+const unsigned char plat_pd_tree_desc[] = {
+ /* Number of root nodes */
+ PLAT_ARM_CLUSTER_COUNT,
+ /* Number of children for the 1st node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 2nd node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 3rd node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 4th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 5th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 6th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 7th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 8th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+#if (NRD_PLATFORM_VARIANT == 0)
+ /* Number of children for the 9th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 10th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 11th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 12th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 13th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 14th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 15th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 16th node */
+ NRD_MAX_CPUS_PER_CLUSTER
+#endif
+};
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return plat_pd_tree_desc;
+}
+
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ unsigned int mpid;
+
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ mpid = make_mpid(plat_cores[core_pos].cluster_id,
+ plat_cores[core_pos].cpu_id);
+
+ return (uint64_t)mpid;
+}
diff --git a/plat/arm/neoverse_rd/platform/rdv1/include/platform_def.h b/plat/arm/neoverse_rd/platform/rdv1/include/platform_def.h
new file mode 100644
index 000000000..6b78d9581
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdv1/include/platform_def.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <nrd_soc_platform_def.h>
+
+#define PLAT_ARM_CLUSTER_COUNT U(16)
+#define NRD_MAX_CPUS_PER_CLUSTER U(1)
+#define NRD_MAX_PE_PER_CPU U(1)
+
+/* GIC related constants */
+#define PLAT_ARM_GICD_BASE UL(0x30000000)
+#define PLAT_ARM_GICR_BASE UL(0x30140000)
+#define PLAT_ARM_GICC_BASE UL(0x2C000000)
+
+/* Platform specific page table and MMU setup constants */
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 42)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 42)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/neoverse_rd/platform/rdv1/platform.mk b/plat/arm/neoverse_rd/platform/rdv1/platform.mk
new file mode 100644
index 000000000..cfb8543ca
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdv1/platform.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include plat/arm/neoverse_rd/common/nrd_common.mk
+
+PLAT_INCLUDES += -Iplat/arm/neoverse_rd/platform/rdv1/include/
+
+PLAT_SOURCES += plat/arm/neoverse_rd/platform/rdv1/topology.c
+
+PLAT_TESTS_SKIP_LIST := plat/arm/neoverse_rd/platform/rdv1/tests_to_skip.txt
+
+ifdef NRD_PLATFORM_VARIANT
+$(error "NRD_PLATFORM_VARIANT should not be set for RD-V1, \
+ currently set to ${NRD_PLATFORM_VARIANT}.")
+endif
diff --git a/plat/arm/neoverse_rd/platform/rdv1/tests_to_skip.txt b/plat/arm/neoverse_rd/platform/rdv1/tests_to_skip.txt
new file mode 100644
index 000000000..d62b9dd56
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdv1/tests_to_skip.txt
@@ -0,0 +1,22 @@
+#
+# Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# OS-initiated mode is not supported on RD-V1
+PSCI CPU Suspend in OSI mode
+
+# System suspend is not supported as there are no wakeup sources in RD-V1 FVP
+PSCI STAT/Stats test cases after system suspend
+PSCI System Suspend Validation
+
+# The following tests hang during the test execution
+Timer framework Validation/Stress test the timer framework
+PSCI Affinity Info/Affinity info level0 powerdown
+PSCI CPU Suspend/CPU suspend to powerdown at level 0
+PSCI CPU Suspend/CPU suspend to powerdown at level 1
+
+# The following tests are not supported on RD-V1
+CPU extensions/Use trace buffer control Registers
+CPU extensions/Use trace filter control Registers
diff --git a/plat/arm/neoverse_rd/platform/rdv1/topology.c b/plat/arm/neoverse_rd/platform/rdv1/topology.c
new file mode 100644
index 000000000..882bffb58
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/rdv1/topology.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2022-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <plat_topology.h>
+#include <tftf_lib.h>
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} plat_cores[] = {
+ /* Cluster0: 1 core */
+ { 0, 0 },
+ /* Cluster1: 1 core */
+ { 1, 0 },
+ /* Cluster2: 1 core */
+ { 2, 0 },
+ /* Cluster3: 1 core */
+ { 3, 0 },
+ /* Cluster4: 1 core */
+ { 4, 0 },
+ /* Cluster5: 1 core */
+ { 5, 0 },
+ /* Cluster6: 1 core */
+ { 6, 0 },
+ /* Cluster7: 1 core */
+ { 7, 0 },
+ /* Cluster8: 1 core */
+ { 8, 0 },
+ /* Cluster9: 1 core */
+ { 9, 0 },
+ /* Cluster10: 1 core */
+ { 10, 0 },
+ /* Cluster11: 1 core */
+ { 11, 0 },
+ /* Cluster12: 1 core */
+ { 12, 0 },
+ /* Cluster13: 1 core */
+ { 13, 0 },
+ /* Cluster14: 1 core */
+ { 14, 0 },
+ /* Cluster15: 1 core */
+ { 15, 0 },
+};
+
+/*
+ * The power domain tree descriptor. The cluster power domains are
+ * arranged so that when the PSCI generic code creates the power domain tree,
+ * the indices of the CPU power domain nodes it allocates match the linear
+ * indices returned by plat_core_pos_by_mpidr().
+ */
+const unsigned char plat_pd_tree_desc[] = {
+ /* Number of root nodes */
+ PLAT_ARM_CLUSTER_COUNT,
+ /* Number of children for the 1st node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 2nd node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 3rd node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 4th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 5th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 6th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 7th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 8th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 9th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 10th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 11th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 12th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 13th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 14th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 15th node */
+ NRD_MAX_CPUS_PER_CLUSTER,
+ /* Number of children for the 16th node */
+ NRD_MAX_CPUS_PER_CLUSTER
+};
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return plat_pd_tree_desc;
+}
+
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ unsigned int mpid;
+
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ mpid = make_mpid(plat_cores[core_pos].cluster_id,
+ plat_cores[core_pos].cpu_id);
+
+ return (uint64_t)mpid;
+}
diff --git a/plat/arm/neoverse_rd/platform/sgi575/include/platform_def.h b/plat/arm/neoverse_rd/platform/sgi575/include/platform_def.h
new file mode 100644
index 000000000..bdaecb933
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/sgi575/include/platform_def.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <nrd_soc_platform_def.h>
+
+#define PLAT_ARM_CLUSTER_COUNT U(2)
+#define NRD_MAX_CPUS_PER_CLUSTER U(4)
+#define NRD_MAX_PE_PER_CPU U(1)
+
+/* GIC related constants */
+#define PLAT_ARM_GICD_BASE UL(0x30000000)
+#define PLAT_ARM_GICC_BASE UL(0x2C000000)
+#define PLAT_ARM_GICR_BASE UL(0x300C0000)
+
+/* Platform specific page table and MMU setup constants */
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/neoverse_rd/platform/sgi575/platform.mk b/plat/arm/neoverse_rd/platform/sgi575/platform.mk
new file mode 100644
index 000000000..5e81be595
--- /dev/null
+++ b/plat/arm/neoverse_rd/platform/sgi575/platform.mk
@@ -0,0 +1,18 @@
+#
+# Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include plat/arm/neoverse_rd/common/nrd_common.mk
+
+PLAT_INCLUDES += -Iplat/arm/neoverse_rd/platform/sgi575/include/
+
+PLAT_SOURCES += plat/arm/neoverse_rd/platform/sgi575/sgi575_topology.c
+
+PLAT_TESTS_SKIP_LIST := plat/arm/neoverse_rd/platform/sgi575/tests_to_skip.txt
+
+ifdef NRD_PLATFORM_VARIANT
+$(error "NRD_PLATFORM_VARIANT should not be set for SGI-575, \
+ currently set to ${NRD_PLATFORM_VARIANT}.")
+endif
diff --git a/plat/arm/sgi/sgi575/sgi575_topology.c b/plat/arm/neoverse_rd/platform/sgi575/sgi575_topology.c
index bda06d3f3..f38c1972c 100644
--- a/plat/arm/sgi/sgi575/sgi575_topology.c
+++ b/plat/arm/neoverse_rd/platform/sgi575/sgi575_topology.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -32,11 +32,11 @@ static const struct {
*/
const unsigned char sgi575_pd_tree_desc[] = {
/* Number of root nodes */
- SGI_CLUSTER_COUNT,
+ PLAT_ARM_CLUSTER_COUNT,
/* Number of children for the 1st node */
- SGI_MAX_CPUS_PER_CLUSTER,
+ NRD_MAX_CPUS_PER_CLUSTER,
/* Number of children for the 2nd node */
- SGI_MAX_CPUS_PER_CLUSTER
+ NRD_MAX_CPUS_PER_CLUSTER
};
const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
diff --git a/plat/arm/sgi/sgi575/tests_to_skip.txt b/plat/arm/neoverse_rd/platform/sgi575/tests_to_skip.txt
index 5f132f577..8817946d1 100644
--- a/plat/arm/sgi/sgi575/tests_to_skip.txt
+++ b/plat/arm/neoverse_rd/platform/sgi575/tests_to_skip.txt
@@ -1,9 +1,12 @@
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2024, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# OS-initiated mode is not supported on SGI-575
+PSCI CPU Suspend in OSI mode
+
# System suspend is not supported as there are no wakeup sources in SGI-575 FVP
PSCI STAT/Stats test cases after system suspend
PSCI System Suspend Validation
diff --git a/plat/arm/rdinfra/rdn1edge/include/platform_def.h b/plat/arm/rdinfra/rdn1edge/include/platform_def.h
deleted file mode 100644
index 45816f5e0..000000000
--- a/plat/arm/rdinfra/rdn1edge/include/platform_def.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef PLATFORM_DEF_H
-#define PLATFORM_DEF_H
-
-#include <sgi_base_platform_def.h>
-
-#define SGI_CLUSTER_COUNT 2
-#define SGI_MAX_CPUS_PER_CLUSTER 4
-#define SGI_MAX_PE_PER_CPU 1
-
-#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/rdinfra/rdn1edge/platform.mk b/plat/arm/rdinfra/rdn1edge/platform.mk
deleted file mode 100644
index 3e25a047e..000000000
--- a/plat/arm/rdinfra/rdn1edge/platform.mk
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-include plat/arm/sgi/common/sgi_common.mk
-
-PLAT_INCLUDES += -Iplat/arm/rdinfra/rdn1edge/include/
-
-PLAT_SOURCES += plat/arm/rdinfra/rdn1edge/topology.c
-
-PLAT_TESTS_SKIP_LIST := plat/arm/rdinfra/rdn1edge/tests_to_skip.txt
diff --git a/plat/arm/sgi/common/sgi_common.mk b/plat/arm/sgi/common/sgi_common.mk
deleted file mode 100644
index f19445fbe..000000000
--- a/plat/arm/sgi/common/sgi_common.mk
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-PLAT_INCLUDES := -Iplat/arm/sgi/common/include/
-
-PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
- drivers/arm/gic/gic_v2.c \
- drivers/arm/gic/gic_v3.c \
- drivers/arm/sp805/sp805.c \
- drivers/arm/timer/private_timer.c \
- drivers/arm/timer/system_timer.c \
- plat/arm/sgi/common/${ARCH}/plat_helpers.S \
- plat/arm/sgi/common/plat_setup.c \
- plat/arm/sgi/common/sgi_mem_prot.c \
- plat/arm/sgi/common/sgi_pwr_state.c
-
-include plat/arm/common/arm_common.mk
-
-ifeq (${USE_NVM},1)
-$(error "USE_NVM is not supported on SGI platforms")
-endif
diff --git a/plat/arm/sgi/common/sgi_mem_prot.c b/plat/arm/sgi/common/sgi_mem_prot.c
deleted file mode 100644
index 14da4cd62..000000000
--- a/plat/arm/sgi/common/sgi_mem_prot.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <platform.h>
-
-#define SGI_DRAM1_NS_START (TFTF_BASE + 0x4000000)
-#define SGI_DRAM1_NS_SIZE 0x10000000
-
-static const mem_region_t sgi_ram_ranges[] = {
- { SGI_DRAM1_NS_START, SGI_DRAM1_NS_SIZE },
-};
-
-const mem_region_t *plat_get_prot_regions(int *nelem)
-{
- *nelem = ARRAY_SIZE(sgi_ram_ranges);
- return sgi_ram_ranges;
-}
diff --git a/plat/arm/sgi/sgi575/include/platform_def.h b/plat/arm/sgi/sgi575/include/platform_def.h
deleted file mode 100644
index 3bceec3aa..000000000
--- a/plat/arm/sgi/sgi575/include/platform_def.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef PLATFORM_DEF_H
-#define PLATFORM_DEF_H
-
-#include <sgi_base_platform_def.h>
-
-#define SGI_CLUSTER_COUNT 2
-#define SGI_MAX_CPUS_PER_CLUSTER 4
-#define SGI_MAX_PE_PER_CPU 1
-
-#endif /* PLATFORM_DEF_H */
diff --git a/plat/arm/sgi/sgi575/platform.mk b/plat/arm/sgi/sgi575/platform.mk
deleted file mode 100644
index 8472d7e0f..000000000
--- a/plat/arm/sgi/sgi575/platform.mk
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-include plat/arm/sgi/common/sgi_common.mk
-
-PLAT_INCLUDES += -Iplat/arm/sgi/sgi575/include/
-
-PLAT_SOURCES += plat/arm/sgi/sgi575/sgi575_topology.c
-
-PLAT_TESTS_SKIP_LIST := plat/arm/sgi/sgi575/tests_to_skip.txt
diff --git a/plat/arm/tc0/aarch64/plat_helpers.S b/plat/arm/tc/aarch64/plat_helpers.S
index 863b378e7..17c3c5fdc 100644
--- a/plat/arm/tc0/aarch64/plat_helpers.S
+++ b/plat/arm/tc/aarch64/plat_helpers.S
@@ -16,15 +16,15 @@
/*----------------------------------------------------------------------
* unsigned int platform_get_core_pos(unsigned long mpid)
*
- * Function to calculate the core position on TC0 platforms.
+ * Function to calculate the core position on TC platforms.
*
- * (ClusterId * TC0_MAX_CPUS_PER_CLUSTER * TC0_MAX_PE_PER_CPU) +
- * (CPUId * TC0_MAX_PE_PER_CPU) +
+ * (ClusterId * TC_MAX_CPUS_PER_CLUSTER * TC_MAX_PE_PER_CPU) +
+ * (CPUId * TC_MAX_PE_PER_CPU) +
* ThreadId
*
* which can be simplified as:
*
- * ((ClusterId * TC0_MAX_CPUS_PER_CLUSTER + CPUId) * TC0_MAX_PE_PER_CPU)
+ * ((ClusterId * TC_MAX_CPUS_PER_CLUSTER + CPUId) * TC_MAX_PE_PER_CPU)
* + ThreadId
* ---------------------------------------------------------------------
*/
@@ -43,9 +43,9 @@ func platform_get_core_pos
ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
/* Compute linear position */
- mov x3, #TC0_MAX_CPUS_PER_CLUSTER
+ mov x3, #TC_MAX_CPUS_PER_CLUSTER
madd x1, x2, x3, x1
- mov x3, #TC0_MAX_PE_PER_CPU
+ mov x3, #TC_MAX_PE_PER_CPU
madd x0, x1, x3, x0
ret
endfunc platform_get_core_pos
diff --git a/plat/arm/tc0/include/platform_def.h b/plat/arm/tc/include/platform_def.h
index acdf370d1..82fa6c27e 100644
--- a/plat/arm/tc0/include/platform_def.h
+++ b/plat/arm/tc/include/platform_def.h
@@ -12,9 +12,9 @@
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
#define PLATFORM_LINKER_ARCH aarch64
-#define TC0_CLUSTER_COUNT 1
-#define TC0_MAX_CPUS_PER_CLUSTER 8
-#define TC0_MAX_PE_PER_CPU 1
+#define TC_CLUSTER_COUNT 1
+#define TC_MAX_CPUS_PER_CLUSTER 8
+#define TC_MAX_PE_PER_CPU 1
/*******************************************************************************
* Run-time address of the TFTF image.
@@ -23,9 +23,9 @@
******************************************************************************/
#define TFTF_BASE 0xE0000000
-#define TC0_DRAM1_BASE 0x80000000
-#define TC0_DRAM1_SIZE 0x80000000
-#define DRAM_BASE TC0_DRAM1_BASE
+#define TC_DRAM1_BASE 0x80000000
+#define TC_DRAM1_SIZE 0x80000000
+#define DRAM_BASE TC_DRAM1_BASE
/*
* TF-A reserves DRAM space 0xFD000000 - 0xFEFFFFFF for Trusted DRAM
@@ -34,7 +34,7 @@
#define ARM_TZC_DRAM1_SIZE 0x01000000
#define ARM_TRUSTED_DRAM1_SIZE 0x02000000
-#define DRAM_SIZE (TC0_DRAM1_SIZE - \
+#define DRAM_SIZE (TC_DRAM1_SIZE - \
ARM_TRUSTED_DRAM1_SIZE - \
ARM_TZC_DRAM1_SIZE)
@@ -48,6 +48,10 @@
/* Base address of non-trusted watchdog (SP805) */
#define SP805_WDOG_BASE 0x1C0F0000
+/* Base address of trusted watchdog (SP805) */
+#define SP805_TWDOG_BASE 0x2A480000
+#define IRQ_TWDOG_INTID 86
+
/* Base address and size of external NVM flash */
#define FLASH_BASE 0x08000000
@@ -63,20 +67,20 @@
#define TFTF_NVM_SIZE 0x8000000 /* 128 MB */
/* Sub-system Peripherals */
-#define TC0_DEVICE0_BASE 0x21000000
-#define TC0_DEVICE0_SIZE 0x5f000000
+#define TC_DEVICE0_BASE 0x21000000
+#define TC_DEVICE0_SIZE 0x5f000000
/* Following covers Peripherals and PCIe expansion area */
-#define TC0_DEVICE1_BASE 0x60000000
-#define TC0_DEVICE1_SIZE 0x20000000
+#define TC_DEVICE1_BASE 0x60000000
+#define TC_DEVICE1_SIZE 0x20000000
/* GIC-600 & interrupt handling related constants */
-#define TC0_GICD_BASE 0x30000000
-#define TC0_GICR_BASE 0x30140000
-#define TC0_GICC_BASE 0x2C000000
+#define TC_GICD_BASE 0x30000000
+#define TC_GICR_BASE 0x30080000
+#define TC_GICC_BASE 0x2C000000
/* SoC's PL011 UART0 related constants */
-#define PL011_UART0_BASE 0x7FF70000
+#define PL011_UART0_BASE 0x2A400000
#define PL011_UART0_CLK_IN_HZ 7372800
/* SoC's PL011 UART1 related constants */
@@ -92,8 +96,8 @@
/* Size of coherent stacks */
#define PCPU_DV_MEM_STACK_SIZE 0x600
-#define PLATFORM_CORE_COUNT (TC0_CLUSTER_COUNT * TC0_MAX_CPUS_PER_CLUSTER)
-#define PLATFORM_NUM_AFFS (TC0_CLUSTER_COUNT + PLATFORM_CORE_COUNT)
+#define PLATFORM_CORE_COUNT (TC_CLUSTER_COUNT * TC_MAX_CPUS_PER_CLUSTER)
+#define PLATFORM_NUM_AFFS (TC_CLUSTER_COUNT + PLATFORM_CORE_COUNT)
#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL1
#define PLAT_MAX_PWR_LEVEL PLATFORM_MAX_AFFLVL
@@ -109,7 +113,12 @@
/* Platform specific page table and MMU setup constants */
#define PLAT_PHY_ADDR_SPACE_SIZE (1ull << 36)
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ull << 36)
+
+#if IMAGE_CACTUS
+#define MAX_XLAT_TABLES 6
+#else
#define MAX_XLAT_TABLES 5
+#endif
#define MAX_MMAP_REGIONS 16
/*******************************************************************************
diff --git a/plat/arm/tc0/plat_setup.c b/plat/arm/tc/plat_setup.c
index 0294c9102..6ad8287ab 100644
--- a/plat/arm/tc0/plat_setup.c
+++ b/plat/arm/tc/plat_setup.c
@@ -8,9 +8,9 @@
#include <xlat_tables_v2.h>
static const mmap_region_t mmap[] = {
- MAP_REGION_FLAT(TC0_DEVICE0_BASE, TC0_DEVICE0_SIZE,
+ MAP_REGION_FLAT(TC_DEVICE0_BASE, TC_DEVICE0_SIZE,
MT_DEVICE | MT_RW | MT_NS),
- MAP_REGION_FLAT(TC0_DEVICE1_BASE, TC0_DEVICE1_SIZE,
+ MAP_REGION_FLAT(TC_DEVICE1_BASE, TC_DEVICE1_SIZE,
MT_DEVICE | MT_RW | MT_NS),
MAP_REGION_FLAT(DRAM_BASE, TFTF_BASE - DRAM_BASE,
MT_MEMORY | MT_RW | MT_NS),
@@ -24,5 +24,5 @@ const mmap_region_t *tftf_platform_get_mmap(void)
void plat_arm_gic_init(void)
{
- arm_gic_init(TC0_GICC_BASE, TC0_GICD_BASE, TC0_GICR_BASE);
+ arm_gic_init(TC_GICC_BASE, TC_GICD_BASE, TC_GICR_BASE);
}
diff --git a/plat/arm/tc/platform.mk b/plat/arm/tc/platform.mk
new file mode 100644
index 000000000..cec047ce7
--- /dev/null
+++ b/plat/arm/tc/platform.mk
@@ -0,0 +1,43 @@
+#
+# Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Default number of threads per CPU on FVP
+TC_MAX_PE_PER_CPU := 1
+
+# Check the PE per core count
+ifneq ($(TC_MAX_PE_PER_CPU),$(filter $(TC_MAX_PE_PER_CPU),1 2))
+$(error "Incorrect TC_MAX_PE_PER_CPU specified for TC port")
+endif
+
+# Pass FVP_MAX_PE_PER_CPU to the build system
+$(eval $(call add_define,TFTF_DEFINES,TC_MAX_PE_PER_CPU))
+$(eval $(call add_define,NS_BL1U_DEFINES,TC_MAX_PE_PER_CPU))
+$(eval $(call add_define,NS_BL2U_DEFINES,TC_MAX_PE_PER_CPU))
+
+PLAT_INCLUDES += -Iplat/arm/tc/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/gic/gic_v3.c \
+ drivers/arm/sp805/sp805.c \
+ drivers/arm/timer/private_timer.c \
+ drivers/arm/timer/system_timer.c \
+ plat/arm/tc/${ARCH}/plat_helpers.S \
+ plat/arm/tc/plat_setup.c \
+ plat/arm/tc/tc_mem_prot.c \
+ plat/arm/tc/tc_pwr_state.c \
+ plat/arm/tc/tc_topology.c
+
+CACTUS_SOURCES += plat/arm/tc/${ARCH}/plat_helpers.S
+IVY_SOURCES += plat/arm/tc/${ARCH}/plat_helpers.S
+
+PLAT_TESTS_SKIP_LIST := plat/arm/tc/tests_to_skip.txt
+
+ifeq (${USE_NVM},1)
+$(error "USE_NVM is not supported on TC platforms")
+endif
+
+include plat/arm/common/arm_common.mk
diff --git a/plat/arm/tc/tc_mem_prot.c b/plat/arm/tc/tc_mem_prot.c
new file mode 100644
index 000000000..473cf44d5
--- /dev/null
+++ b/plat/arm/tc/tc_mem_prot.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform.h>
+
+#define TC_DRAM1_NS_START (TFTF_BASE + 0x4000000)
+#define TC_DRAM1_NS_SIZE 0x10000000
+
+static const mem_region_t tc_ram_ranges[] = {
+ { TC_DRAM1_NS_START, TC_DRAM1_NS_SIZE }
+};
+
+const mem_region_t *plat_get_prot_regions(int *nelem)
+{
+ *nelem = ARRAY_SIZE(tc_ram_ranges);
+ return tc_ram_ranges;
+}
diff --git a/plat/arm/tc0/tc0_pwr_state.c b/plat/arm/tc/tc_pwr_state.c
index 17d3b39e4..97fcd97b0 100644
--- a/plat/arm/tc0/tc0_pwr_state.c
+++ b/plat/arm/tc/tc_pwr_state.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,27 +7,27 @@
#include <platform.h>
#include <psci.h>
-/* State IDs for local power states on SGI platforms. */
-#define TC0_PS_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
-#define TC0_PS_RETENTION_STATE_ID 1 /* Valid for only CPUs */
-#define TC0_PS_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
+/* State IDs for local power states on TC platform. */
+#define TC_PS_RUN_STATE_ID 0 /* Valid for CPUs and Clusters */
+#define TC_PS_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define TC_PS_OFF_STATE_ID 2 /* Valid for CPUs and Clusters */
/* Suspend depth definitions for each power state */
-#define TC0_PS_RUN_DEPTH 0
-#define TC0_PS_RETENTION_DEPTH 1
-#define TC0_PS_OFF_DEPTH 2
+#define TC_PS_RUN_DEPTH 0
+#define TC_PS_RETENTION_DEPTH 1
+#define TC_PS_OFF_DEPTH 2
/* The state property array with details of idle state possible for the core */
static const plat_state_prop_t core_state_prop[] = {
- {TC0_PS_RETENTION_DEPTH, TC0_PS_RETENTION_STATE_ID,
+ {TC_PS_RETENTION_DEPTH, TC_PS_RETENTION_STATE_ID,
PSTATE_TYPE_STANDBY},
- {TC0_PS_OFF_DEPTH, TC0_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {TC_PS_OFF_DEPTH, TC_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
{0}
};
/* The state property array with details of idle state possible for the cluster */
static const plat_state_prop_t cluster_state_prop[] = {
- {TC0_PS_OFF_DEPTH, TC0_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {TC_PS_OFF_DEPTH, TC_PS_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
{0}
};
diff --git a/plat/arm/tc0/tc0_topology.c b/plat/arm/tc/tc_topology.c
index 9e30b645b..389eb1dc2 100644
--- a/plat/arm/tc0/tc0_topology.c
+++ b/plat/arm/tc/tc_topology.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,7 +11,7 @@
static const struct {
unsigned int cluster_id;
unsigned int cpu_id;
-} tc0_cores[] = {
+} tc_cores[] = {
/* Cluster0: 8 cores*/
{ 0, 0 },
{ 0, 1 },
@@ -29,18 +29,18 @@ static const struct {
* the indices of the CPU power domain nodes it allocates match the linear
* indices returned by plat_core_pos_by_mpidr().
*/
-const unsigned char tc0_pd_tree_desc[] = {
+const unsigned char tc_pd_tree_desc[] = {
/* Number of root nodes */
- TC0_CLUSTER_COUNT,
+ TC_CLUSTER_COUNT,
/* Number of children for the 1st node */
- TC0_MAX_CPUS_PER_CLUSTER,
+ TC_MAX_CPUS_PER_CLUSTER,
/* Number of children for the 2nd node */
- TC0_MAX_CPUS_PER_CLUSTER
+ TC_MAX_CPUS_PER_CLUSTER
};
const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
{
- return tc0_pd_tree_desc;
+ return tc_pd_tree_desc;
}
uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
@@ -49,8 +49,8 @@ uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
assert(core_pos < PLATFORM_CORE_COUNT);
- mpid = (uint64_t)make_mpid(tc0_cores[core_pos].cluster_id,
- tc0_cores[core_pos].cpu_id);
+ mpid = (uint64_t)make_mpid(tc_cores[core_pos].cluster_id,
+ tc_cores[core_pos].cpu_id);
return mpid;
}
diff --git a/plat/arm/tc0/tests_to_skip.txt b/plat/arm/tc/tests_to_skip.txt
index f039eeda2..762bfa0fa 100644
--- a/plat/arm/tc0/tests_to_skip.txt
+++ b/plat/arm/tc/tests_to_skip.txt
@@ -4,9 +4,15 @@
# SPDX-License-Identifier: BSD-3-Clause
#
-# System suspend is not supported as there are no wakeup sources in tc0 FVP
+# Disable SMMUv3 tests
+SMMUv3 tests
+
+# OS-initiated mode is not supported
+PSCI CPU Suspend in OSI mode
# PSCI is enabled but not tested
PSCI STAT/Stats test cases after system suspend
PSCI System Suspend Validation
-PSCI NODE_HW_STATE
+
+# Disable FF-A Interrupt tests as TWDOG is not supported by TC platform
+FF-A Interrupt
diff --git a/plat/arm/tc0/platform.mk b/plat/arm/tc0/platform.mk
deleted file mode 100644
index faf0d1953..000000000
--- a/plat/arm/tc0/platform.mk
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# Default number of threads per CPU on FVP
-TC0_MAX_PE_PER_CPU := 1
-
-# Check the PE per core count
-ifneq ($(TC0_MAX_PE_PER_CPU),$(filter $(TC0_MAX_PE_PER_CPU),1 2))
-$(error "Incorrect TC0_MAX_PE_PER_CPU specified for TC0 port")
-endif
-
-# Pass FVP_MAX_PE_PER_CPU to the build system
-$(eval $(call add_define,TFTF_DEFINES,TC0_MAX_PE_PER_CPU))
-$(eval $(call add_define,NS_BL1U_DEFINES,TC0_MAX_PE_PER_CPU))
-$(eval $(call add_define,NS_BL2U_DEFINES,TC0_MAX_PE_PER_CPU))
-
-PLAT_INCLUDES += -Iplat/arm/tc0/include/
-
-PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
- drivers/arm/gic/gic_v2.c \
- drivers/arm/gic/gic_v3.c \
- drivers/arm/sp805/sp805.c \
- drivers/arm/timer/private_timer.c \
- drivers/arm/timer/system_timer.c \
- plat/arm/tc0/${ARCH}/plat_helpers.S \
- plat/arm/tc0/plat_setup.c \
- plat/arm/tc0/tc0_mem_prot.c \
- plat/arm/tc0/tc0_pwr_state.c \
- plat/arm/tc0/tc0_topology.c
-
-CACTUS_SOURCES += plat/arm/tc0/${ARCH}/plat_helpers.S
-
-PLAT_TESTS_SKIP_LIST := plat/arm/tc0/tests_to_skip.txt
-
-ifeq (${USE_NVM},1)
-$(error "USE_NVM is not supported on TC0 platforms")
-endif
-
-include plat/arm/common/arm_common.mk
diff --git a/plat/arm/tc0/tc0_mem_prot.c b/plat/arm/tc0/tc0_mem_prot.c
deleted file mode 100644
index 06a333ec2..000000000
--- a/plat/arm/tc0/tc0_mem_prot.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <platform.h>
-
-#define TC0_DRAM1_NS_START (TFTF_BASE + 0x4000000)
-#define TC0_DRAM1_NS_SIZE 0x10000000
-
-static const mem_region_t tc0_ram_ranges[] = {
- { TC0_DRAM1_NS_START, TC0_DRAM1_NS_SIZE }
-};
-
-const mem_region_t *plat_get_prot_regions(int *nelem)
-{
- *nelem = ARRAY_SIZE(tc0_ram_ranges);
- return tc0_ram_ranges;
-}
diff --git a/plat/common/plat_topology.c b/plat/common/plat_topology.c
index 5ff7a31ec..a9b98285e 100644
--- a/plat/common/plat_topology.c
+++ b/plat/common/plat_topology.c
@@ -184,11 +184,14 @@ static void get_parent_pwr_domain_nodes(unsigned int cpu_node,
static void update_pwrlvl_limits(void)
{
int cpu_id, j, is_present;
- unsigned int nodes_idx[PLATFORM_MAX_AFFLVL] = {-1};
+ unsigned int nodes_idx[PLATFORM_MAX_AFFLVL];
unsigned int temp_index[PLATFORM_MAX_AFFLVL];
unsigned int cpu_node_offset = tftf_pwr_domain_start_idx[0];
+ for (j = 0; j < PLATFORM_MAX_AFFLVL; j++)
+ nodes_idx[j] = -1;
+
for (cpu_id = 0; cpu_id < PLATFORM_CORE_COUNT; cpu_id++) {
get_parent_pwr_domain_nodes(cpu_id + cpu_node_offset,
PLATFORM_MAX_AFFLVL,
diff --git a/plat/hisilicon/hikey960/tests.xml b/plat/hisilicon/hikey960/tests.xml
index c72b7834c..a1d874669 100644
--- a/plat/hisilicon/hikey960/tests.xml
+++ b/plat/hisilicon/hikey960/tests.xml
@@ -10,10 +10,12 @@
<!-- External references to all individual tests files. -->
<!DOCTYPE testsuites [
<!ENTITY tests-tftf-validation SYSTEM "../../../tftf/tests/tests-tftf-validation.xml">
+ <!ENTITY tests-timer-stress SYSTEM "../../../tftf/tests/tests-timer-stress.xml">
]>
<testsuites>
&tests-tftf-validation;
+ &tests-timer-stress;
</testsuites>
diff --git a/plat/nvidia/tegra186/include/platform_def.h b/plat/nvidia/tegra186/include/platform_def.h
index d21d83c0c..8ad93ad86 100644
--- a/plat/nvidia/tegra186/include/platform_def.h
+++ b/plat/nvidia/tegra186/include/platform_def.h
@@ -124,6 +124,7 @@
* IRQ value for Tegra Timer0
******************************************************************************/
#define TEGRA_RTC_IRQ U(42)
+#define IRQ_TWDOG_INTID TEGRA_RTC_IRQ
/*******************************************************************************
* Platform specific page table and MMU setup constants
diff --git a/plat/nvidia/tegra186/tests_to_skip.txt b/plat/nvidia/tegra186/tests_to_skip.txt
index 1a1849532..7086208f8 100644
--- a/plat/nvidia/tegra186/tests_to_skip.txt
+++ b/plat/nvidia/tegra186/tests_to_skip.txt
@@ -15,6 +15,9 @@ PSCI CPU Suspend/CPU suspend to standby at level 1
PSCI CPU Suspend/CPU suspend to standby at level 2
PSCI System Suspend Validation/Suspend system with cores in suspend
+# Tegra186 platforms do not support OS-initiated mode
+PSCI CPU Suspend in OSI mode
+
# Tegra186 platforms are facing problems with system suspend
PSCI System Suspend Validation
diff --git a/plat/nvidia/tegra194/include/platform_def.h b/plat/nvidia/tegra194/include/platform_def.h
index 9b27da3d1..0cd3ecdaa 100644
--- a/plat/nvidia/tegra194/include/platform_def.h
+++ b/plat/nvidia/tegra194/include/platform_def.h
@@ -125,6 +125,7 @@
* IRQ value for Tegra Timer0
******************************************************************************/
#define TEGRA_RTC_IRQ U(42)
+#define IRQ_TWDOG_INTID TEGRA_RTC_IRQ
/*******************************************************************************
* Platform specific page table and MMU setup constants
diff --git a/plat/nvidia/tegra194/tests_to_skip.txt b/plat/nvidia/tegra194/tests_to_skip.txt
index f1be76eb7..14eb0dd04 100644
--- a/plat/nvidia/tegra194/tests_to_skip.txt
+++ b/plat/nvidia/tegra194/tests_to_skip.txt
@@ -15,6 +15,9 @@ PSCI CPU Suspend/CPU suspend to standby at level 1
PSCI CPU Suspend/CPU suspend to standby at level 2
PSCI System Suspend Validation/Suspend system with cores in suspend
+# Tegra194 platforms do not support OS-initiated mode
+PSCI CPU Suspend in OSI mode
+
# Tegra194 platforms enter system suspend only from the boot core
PSCI System Suspend Validation/system suspend from all cores
diff --git a/plat/nvidia/tegra210/include/platform_def.h b/plat/nvidia/tegra210/include/platform_def.h
index f629053da..0e369b363 100644
--- a/plat/nvidia/tegra210/include/platform_def.h
+++ b/plat/nvidia/tegra210/include/platform_def.h
@@ -123,6 +123,7 @@
* IRQ value for Tegra RTC
******************************************************************************/
#define TEGRA_RTC_IRQ U(34)
+#define IRQ_TWDOG_INTID TEGRA_RTC_IRQ
/*******************************************************************************
* Platform specific page table and MMU setup constants
diff --git a/plat/nvidia/tegra210/tests_to_skip.txt b/plat/nvidia/tegra210/tests_to_skip.txt
index 62fb9580d..6c6896740 100644
--- a/plat/nvidia/tegra210/tests_to_skip.txt
+++ b/plat/nvidia/tegra210/tests_to_skip.txt
@@ -15,6 +15,9 @@ PSCI CPU Suspend/CPU suspend to standby at level 2
PSCI CPU Suspend/CPU suspend to powerdown at level 1
PSCI CPU Suspend/CPU suspend to powerdown at level 2
+# Tegra186 platforms do not support OS-initiated mode
+PSCI CPU Suspend in OSI mode
+
# Tegra210 platforms enter system suspend only from the boot core
PSCI System Suspend Validation
diff --git a/plat/xilinx/common/timer/timers.c b/plat/xilinx/common/timer/timers.c
new file mode 100644
index 000000000..f53cd84b1
--- /dev/null
+++ b/plat/xilinx/common/timer/timers.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <debug.h>
+#include <mmio.h>
+#include <platform.h>
+#include <tftf_lib.h>
+#include <timer.h>
+#include <utils_def.h>
+
+#define TTC_OFFSET_TMR_0 U(0)
+#define TTC_OFFSET_TMR_1 U(4)
+#define TTC_OFFSET_TMR_2 U(8)
+
+#define TTC_CLK_CNTRL_OFFSET U(0x00) /* Clock Control Reg, RW */
+#define TTC_CNT_CNTRL_OFFSET U(0x0C) /* Counter Control Reg, RW */
+#define TTC_COUNT_VAL_OFFSET U(0x18) /* Counter Value Reg, RO */
+#define TTC_INTR_VAL_OFFSET U(0x24) /* Interval Count Reg, RW */
+#define TTC_ISR_OFFSET U(0x54) /* Interrupt Status Reg, RO */
+#define TTC_IER_OFFSET U(0x60) /* Interrupt Enable Reg, RW */
+
+#define TTC_CNT_CNTRL_DISABLE_MASK BIT(0)
+
+#define TTC_CLK_SEL_MASK GENMASK(1, 0)
+
+#define TTC_CLK_SEL_PS_REF BIT(0)
+#define TTC_CLK_SEL_RPU_REF BIT(4)
+
+#define RET_SUCCESS U(0)
+
+/*
+ * Setup the timers to use pre-scaling, using a fixed value for now that will
+ * work across most input frequency, but it may need to be more dynamic
+ */
+#define PRESCALE_EXPONENT U(16) /* 2 ^ PRESCALE_EXPONENT = PRESCALE */
+#define PRESCALE U(65536) /* The exponent must match this */
+#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1U)
+#define CLK_CNTRL_PRESCALE_EN BIT(0)
+#define CNT_CNTRL_RESET BIT(4)
+
+/* Resolution obtained as per the input clock and Prescale value
+ * Clock Selected : PS_REF_CLK
+ * Clock Value : 33333333Hz (33.33MHz)
+ * Prescalar for TTC, N : 15 (highest)
+ * Prescalar Applied 2^(N+1) : 65536
+ * Input clock : (PS_REF_CLK)/Prescalar) : 508.6263Hz
+ * Resolution (1/InputClock) : 1.966miliseconds ~2ms
+ */
+const unsigned long INTERVAL = 2;
+
+static void timer_write_32(uint32_t offset, uint32_t val)
+{
+ /* actual write */
+ mmio_write_32(SYS_CNT_BASE1 + offset, val);
+}
+
+static uint32_t timer_read_32(uint32_t offset)
+{
+ /* actual read */
+ return mmio_read_32(SYS_CNT_BASE1 + offset);
+}
+
+static int cancel_timer(void)
+{
+ /* Disable Interrupt */
+ timer_write_32(TTC_IER_OFFSET, 0);
+
+ /* Disable Counter */
+ timer_write_32(TTC_CLK_CNTRL_OFFSET, !CLK_CNTRL_PRESCALE_EN);
+ timer_write_32(TTC_CNT_CNTRL_OFFSET, !CLK_CNTRL_PRESCALE_EN);
+
+ return RET_SUCCESS;
+}
+
+static void clocksetup(void)
+{
+ timer_write_32(TTC_OFFSET_TMR_0 + TTC_CLK_CNTRL_OFFSET, 0x0);
+
+ mmio_write_32(LPD_IOU_SLCR + TTC_CLK_SEL_OFFSET, TTC_CLK_SEL_PS_REF);
+
+ VERBOSE("%s TTC_CLK_SEL = 0x%x\n", __func__,
+ mmio_read_32(LPD_IOU_SLCR + TTC_CLK_SEL_OFFSET));
+}
+
+static void setcounts(unsigned long time_out_ms)
+{
+ unsigned long intrvl = (time_out_ms / INTERVAL) + (time_out_ms % INTERVAL);
+
+ timer_write_32(TTC_INTR_VAL_OFFSET, intrvl);
+}
+
+static int program_timer(unsigned long time_out_ms)
+{
+ uint32_t reg;
+
+ /* Disable and program the counter */
+ reg = timer_read_32(TTC_CNT_CNTRL_OFFSET);
+ reg |= TTC_CNT_CNTRL_DISABLE_MASK;
+ timer_write_32(TTC_CNT_CNTRL_OFFSET, reg);
+
+ setcounts(time_out_ms);
+
+ /* Enable the interrupt */
+ timer_write_32(TTC_IER_OFFSET, 0x01);
+
+ /* Enable the counter */
+ reg |= CNT_CNTRL_RESET;
+ reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
+ timer_write_32(TTC_CNT_CNTRL_OFFSET, reg);
+
+ return RET_SUCCESS;
+}
+
+static int handler_timer(void)
+{
+ uint32_t status;
+
+ /* Disable the interrupts */
+ timer_write_32(TTC_IER_OFFSET, 0x00);
+
+ status = timer_read_32(TTC_ISR_OFFSET);
+ if (status & 0x1)
+ INFO("Timer Event! %x\n", status);
+ else
+ ERROR("Its not a Timer Event %d\n", status);
+
+ return RET_SUCCESS;
+}
+
+static const plat_timer_t timers = {
+ .program = program_timer,
+ .cancel = cancel_timer,
+ .handler = handler_timer,
+ .timer_step_value = INTERVAL,
+ .timer_irq = TTC_TIMER_IRQ
+};
+
+int plat_initialise_timer_ops(const plat_timer_t **timer_ops)
+{
+ assert(timer_ops != NULL);
+
+ /* Disable all Interrupts on the TTC */
+ timer_write_32(TTC_OFFSET_TMR_0 + TTC_IER_OFFSET, 0);
+ timer_write_32(TTC_OFFSET_TMR_1 + TTC_IER_OFFSET, 0);
+ timer_write_32(TTC_OFFSET_TMR_2 + TTC_IER_OFFSET, 0);
+
+ clocksetup();
+
+ /*
+ * Setup the clock event timer to be an interval timer which
+ * is prescaled by 32 using the interval interrupt. Leave it
+ * disabled for now.
+ */
+ timer_write_32(TTC_OFFSET_TMR_0 + TTC_CNT_CNTRL_OFFSET, 0x23);
+ timer_write_32(TTC_OFFSET_TMR_0 + TTC_CLK_CNTRL_OFFSET,
+ CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN);
+ timer_write_32(TTC_OFFSET_TMR_0 + TTC_IER_OFFSET, 0x01);
+
+ *timer_ops = &timers;
+
+ return RET_SUCCESS;
+}
diff --git a/plat/xilinx/versal/aarch64/plat_helpers.S b/plat/xilinx/versal/aarch64/plat_helpers.S
new file mode 100644
index 000000000..b8f1109ae
--- /dev/null
+++ b/plat/xilinx/versal/aarch64/plat_helpers.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+ .global platform_get_core_pos
+ .global plat_crash_console_init
+ .global plat_crash_console_flush
+ .global plat_crash_console_putc
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(u_register_t mpid)
+ *
+ * Function to calculate the core position.
+ *
+ * clobbers: x0 - x3
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /* x1 = core-id inside cluster */
+ ubfx x1, x0, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ /* x2 = cluster-id */
+ ubfx x2, x0, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* core-position = cluster-id * cores per cluster + core-id */
+ mov x3, #PLATFORM_CORE_COUNT_PER_CLUSTER
+ madd x0, x2, x3, x1
+ ret
+endfunc platform_get_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, CRASH_CONSOLE_BASE
+ mov_imm x1, PL011_UART_CLK_IN_HZ
+ mov_imm x2, PL011_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, CRASH_CONSOLE_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : r0 - r1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x1, CRASH_CONSOLE_BASE
+ b console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/xilinx/versal/include/platform_def.h b/plat/xilinx/versal/include/platform_def.h
new file mode 100644
index 000000000..73b6db21d
--- /dev/null
+++ b/plat/xilinx/versal/include/platform_def.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <arch.h>
+
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+#define TFTF_BASE U(0x8000000)
+
+#define CACHE_WRITEBACK_GRANULE U(0x40)
+
+#define PLATFORM_CLUSTER_COUNT 1
+#define PLATFORM_CORE_COUNT_PER_CLUSTER 2
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * \
+ PLATFORM_CORE_COUNT_PER_CLUSTER)
+#define PLATFORM_NUM_AFFS (PLATFORM_CORE_COUNT + \
+ PLATFORM_CLUSTER_COUNT + 1)
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_LEVEL MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+
+#define PLATFORM_STACK_SIZE U(0x880)
+#define PCPU_DV_MEM_STACK_SIZE U(0x440)
+
+
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
+#define MAX_XLAT_TABLES U(8)
+#define MAX_MMAP_REGIONS U(16)
+
+#define DRAM_BASE U(0x0)
+#define DRAM_SIZE U(0x80000000)
+
+/*
+ * TFTF_NVM_OFFSET/SIZE correspond to the NVM partition in the partition
+ * table
+ */
+#define TFTF_NVM_SIZE U(0x600000)
+#define TFTF_NVM_OFFSET U(0x20000000)
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH U(4)
+
+/* GIC-400 related addresses from datasheet */
+#define GICD_REG_BASE U(0xf9000000)
+#define GICC_REG_BASE U(0xf9040000)
+#define GICR_REG_BASE U(0xf9080000)
+
+/*
+ * Memory mapped devices that we must create MMU mappings for them
+ */
+#define GIC_BASE GICD_REG_BASE
+#define GIC_SIZE U(0x01000000)
+
+#define TTC_BASE U(0xff0e0000)
+#define TTC_SIZE U(0x00010000)
+
+#define SYS_CNT_BASE1 TTC_BASE
+#define SYS_CNT_SIZE TTC_SIZE
+
+#define LPD_IOU_SLCR U(0xff080000)
+#define LPD_IOU_SLCR_SIZE U(0x00010000)
+
+/* ARM PL011 UART */
+#define PL011_UART0_BASE U(0xff000000)
+#define PL011_BAUDRATE U(115200)
+#define PL011_UART_CLK_IN_HZ U(100000000)
+
+#define PLAT_ARM_UART_BASE PL011_UART0_BASE
+#define PLAT_ARM_UART_SIZE U(0x1000)
+
+#define CRASH_CONSOLE_BASE PL011_UART0_BASE
+#define CRASH_CONSOLE_SIZE PLAT_ARM_UART_SIZE
+
+/*******************************************************************************
+ * Non-Secure Software Generated Interrupts IDs
+ ******************************************************************************/
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_1 1
+#define IRQ_NS_SGI_2 2
+#define IRQ_NS_SGI_3 3
+#define IRQ_NS_SGI_4 4
+#define IRQ_NS_SGI_5 5
+#define IRQ_NS_SGI_6 6
+#define IRQ_NS_SGI_7 7
+
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER U(29)
+/* Datasheet: TIME00 event*/
+#define IRQ_CNTPSIRQ1 U(29)
+
+/* Refer to AM011(v1.5), Chapter 50, Page 430 */
+#define PLAT_MAX_SPI_OFFSET_ID U(223)
+
+/*
+ * Times(in ms) used by test code for completion of different events.
+ */
+#define PLAT_SUSPEND_ENTRY_TIME U(15)
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME U(30)
+
+/*
+ * Dummy definitions that we need just to compile...
+ */
+#define ARM_SECURE_SERVICE_BUFFER_BASE U(0)
+#define ARM_SECURE_SERVICE_BUFFER_SIZE U(100)
+
+/* LPD_SWDT_INT, AM011(v1.5), Chapter 50, Page 428 */
+#define IRQ_TWDOG_INTID U(0x51)
+
+#define TTC_TIMER_IRQ U(69)
+#define TTC_CLK_SEL_OFFSET U(0x360)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/xilinx/versal/platform.mk b/plat/xilinx/versal/platform.mk
new file mode 100644
index 000000000..2161e8944
--- /dev/null
+++ b/plat/xilinx/versal/platform.mk
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLNX_COMMON_PATH := plat/xilinx/common
+VERSAL_PATH := plat/xilinx/versal
+
+PLAT_INCLUDES := -I${VERSAL_PATH}/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
+ drivers/arm/gic/gic_common.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/gic/gic_v3.c \
+ drivers/arm/pl011/${ARCH}/pl011_console.S \
+ drivers/arm/timer/private_timer.c \
+ drivers/console/console.c \
+ ${VERSAL_PATH}/versal_setup.c \
+ ${VERSAL_PATH}/versal_pwr_state.c \
+ ${VERSAL_PATH}/aarch64/plat_helpers.S \
+ ${XLNX_COMMON_PATH}/timer/timers.c
+
+PLAT_TESTS_SKIP_LIST := ${VERSAL_PATH}/tests_to_skip.txt
+
+ifeq ($(USE_NVM),1)
+$(error "Versal port of TFTF doesn't currently support USE_NVM=1")
+endif
diff --git a/plat/xilinx/versal/tests_to_skip.txt b/plat/xilinx/versal/tests_to_skip.txt
new file mode 100644
index 000000000..87b9e41a2
--- /dev/null
+++ b/plat/xilinx/versal/tests_to_skip.txt
@@ -0,0 +1,54 @@
+#
+# Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+################################################################################
+# Disable the listed tests for Versal Platform.
+################################################################################
+
+#TESTS: tftf-validation
+Framework Validation/Events API
+Timer framework Validation/Target timer to a power down cpu
+Timer framework Validation/Test scenario where multiple CPUs call same timeout
+
+#TESTS: Boot requirement tests
+Boot requirement tests
+
+#TESTS: psci
+PSCI Affinity Info/Affinity info level0 powerdown
+PSCI CPU Suspend/CPU suspend to powerdown at level 0
+PSCI CPU Suspend/CPU suspend to powerdown at level 1
+PSCI CPU Suspend/CPU suspend to powerdown at level 2
+PSCI CPU Suspend/CPU suspend to standby at level 0
+PSCI CPU Suspend/CPU suspend to standby at level 1
+PSCI CPU Suspend/CPU suspend to standby at level 2
+PSCI CPU Suspend in OSI mode
+CPU Hotplug/Invalid entry point
+PSCI System Suspend Validation/System suspend multiple times
+PSCI System Suspend Validation/system suspend from all cores
+PSCI System Suspend Validation/Validate suspend to RAM functionality
+
+#TESTS: el3-power-state
+EL3 power state parser validation/Create all power states and validate EL3 power state parsing
+EL3 power state parser validation/Create invalid local power state at all levels and validate EL3 power state parsing
+EL3 power state parser validation/Create invalid power state type and validate EL3 power state parsing
+EL3 power state parser validation/Create a power state with valid and invalid local state ID at different levels and validate power state parsing
+
+#TESTS: psci-extensive
+PSCI CPU ON OFF Stress Tests/PSCI CPU ON OFF stress test
+PSCI CPU ON OFF Stress Tests/Repeated hotplug of all cores to stress test CPU_ON and CPU_OFF
+PSCI CPU ON OFF Stress Tests/Random hotplug cores in a large iteration to stress boot path code
+
+#TESTS: TSP
+IRQ support in TSP/Resume preempted STD SMC after PSCI CPU OFF/ON cycle
+IRQ support in TSP/Resume preempted STD SMC after PSCI SYSTEM SUSPEND
+IRQ support in TSP/Resume preempted STD SMC
+TSP PSTATE test
+
+#TESTS: runtime-instrumentation
+Runtime Instrumentation Validation
+
+#TESTS: debugfs
+DebugFS
diff --git a/plat/xilinx/versal/versal_pwr_state.c b/plat/xilinx/versal/versal_pwr_state.c
new file mode 100644
index 000000000..74c43c026
--- /dev/null
+++ b/plat/xilinx/versal/versal_pwr_state.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+
+/*
+ * State IDs for local power states.
+ */
+#define VERSAL_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define VERSAL_OFF_STATE_ID 0 /* Valid for CPUs and Clusters */
+
+/*
+ * Suspend depth definitions for each power state
+ */
+typedef enum {
+ VERSAL_RUN_DEPTH = 0,
+ VERSAL_RETENTION_DEPTH,
+ VERSAL_OFF_DEPTH,
+} suspend_depth_t;
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+ {VERSAL_RETENTION_DEPTH, VERSAL_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+ {VERSAL_OFF_DEPTH, VERSAL_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible for the cluster */
+static const plat_state_prop_t cluster_state_prop[] = {
+ {VERSAL_OFF_DEPTH, VERSAL_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible for the system level */
+static const plat_state_prop_t system_state_prop[] = {
+ {VERSAL_OFF_DEPTH, VERSAL_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+ switch (level) {
+ case MPIDR_AFFLVL0:
+ return core_state_prop;
+ case MPIDR_AFFLVL1:
+ return cluster_state_prop;
+ case MPIDR_AFFLVL2:
+ return system_state_prop;
+ default:
+ return NULL;
+ }
+}
diff --git a/plat/xilinx/versal/versal_setup.c b/plat/xilinx/versal/versal_setup.c
new file mode 100644
index 000000000..35589ab4a
--- /dev/null
+++ b/plat/xilinx/versal/versal_setup.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <drivers/arm/arm_gic.h>
+#include <drivers/console.h>
+#include <platform.h>
+#include <tftf_lib.h>
+
+#include <platform_def.h>
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} versal_cores[PLATFORM_CORE_COUNT] = {
+ { 0, 0 },
+ { 0, 1 }
+};
+
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE + TFTF_NVM_OFFSET, TFTF_NVM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(GIC_BASE, GIC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(CRASH_CONSOLE_BASE, CRASH_CONSOLE_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(TTC_BASE, TTC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(LPD_IOU_SLCR, LPD_IOU_SLCR_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ {0}
+};
+
+/* Power Domain Tree Descriptor array */
+const unsigned char versal_pwr_tree_desc[] = {
+ /* Number of root nodes */
+ 1,
+ /* Number of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* Number of children for the first cluster node */
+ PLATFORM_CORE_COUNT_PER_CLUSTER
+};
+
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return versal_pwr_tree_desc;
+}
+
+/*
+ * Generate the MPID from the core position.
+ */
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ return (uint64_t)make_mpid(versal_cores[core_pos].cluster_id,
+ versal_cores[core_pos].cpu_id);
+}
+
+void tftf_plat_arch_setup(void)
+{
+ tftf_plat_configure_mmu();
+}
+
+void tftf_early_platform_setup(void)
+{
+ console_init(CRASH_CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+}
+
+void tftf_platform_setup(void)
+{
+ arm_gic_init(GICC_REG_BASE, GICD_REG_BASE, GICR_REG_BASE);
+ arm_gic_setup_global();
+ arm_gic_setup_local();
+}
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+ return mmap;
+}
diff --git a/plat/xilinx/versal_net/aarch64/plat_helpers.S b/plat/xilinx/versal_net/aarch64/plat_helpers.S
new file mode 100644
index 000000000..698a8bc97
--- /dev/null
+++ b/plat/xilinx/versal_net/aarch64/plat_helpers.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+ .global platform_get_core_pos
+ .global plat_crash_console_init
+ .global plat_crash_console_flush
+ .global plat_crash_console_putc
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(u_register_t mpid)
+ *
+ * Function to calculate the core position.
+ *
+ * clobbers: x0 - x3
+ * ---------------------------------------------------------------------
+ */
+func platform_get_core_pos
+ /*
+ * Check for MT bit in MPIDR. If not set, shift MPIDR to left to make it
+ * look as if in a multi-threaded implementation.
+ */
+ tst x0, #MPIDR_MT_MASK
+ lsl x3, x0, #MPIDR_AFFINITY_BITS
+ csel x3, x3, x0, eq
+
+ /* x1 = core-id inside cluster */
+ ubfx x0, x3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
+ ubfx x1, x3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
+ /* x2 = cluster-id */
+ ubfx x2, x3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
+
+ /* check if cpu_id valid */
+ cmp x1, #PLATFORM_CORE_COUNT_PER_CLUSTER
+ b.hi error
+
+ /* check if cluster valid */
+ cmp x2, #PLATFORM_CLUSTER_COUNT
+ b.hi error
+
+ /* core-position = cluster-id * cores per cluster + core-id */
+ mov x3, #PLATFORM_CORE_COUNT_PER_CLUSTER
+ madd x1, x2, x3, x1
+ mov x3, #PLATFORM_MAX_PE_PER_CPU
+ madd x0, x1, x3, x0
+ ret
+error:
+ mov x0, #-1
+ ret
+endfunc platform_get_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, CRASH_CONSOLE_BASE
+ mov_imm x1, PL011_UART_CLK_IN_HZ
+ mov_imm x2, PL011_BAUDRATE
+ b console_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, CRASH_CONSOLE_BASE
+ b console_core_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : r0 - r1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x1, CRASH_CONSOLE_BASE
+ b console_core_flush
+endfunc plat_crash_console_flush
diff --git a/plat/xilinx/versal_net/include/platform_def.h b/plat/xilinx/versal_net/include/platform_def.h
new file mode 100644
index 000000000..92a7ba073
--- /dev/null
+++ b/plat/xilinx/versal_net/include/platform_def.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <arch.h>
+
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+#define TFTF_BASE U(0x8000000)
+
+#define CACHE_WRITEBACK_GRANULE U(0x40)
+
+#define PLATFORM_CLUSTER_COUNT U(4)
+#define PLATFORM_CORE_COUNT_PER_CLUSTER U(4)
+#define PLATFORM_MAX_PE_PER_CPU U(1)
+/* Because of make_mpid from include/lib/tftf_lib.h */
+#define PLAT_MAX_PE_PER_CPU PLATFORM_MAX_PE_PER_CPU
+
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * \
+ PLATFORM_CORE_COUNT_PER_CLUSTER)
+#define PLATFORM_NUM_AFFS (PLATFORM_CORE_COUNT + \
+ PLATFORM_CLUSTER_COUNT + 1)
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_LEVEL MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_STATES_PER_LVL U(2)
+
+
+#define PLATFORM_STACK_SIZE U(0x440)
+#define PCPU_DV_MEM_STACK_SIZE U(0x440)
+
+
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 32)
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 32)
+#define MAX_XLAT_TABLES U(8)
+#define MAX_MMAP_REGIONS U(16)
+
+#define DRAM_BASE U(0x0)
+#define DRAM_SIZE U(0x80000000)
+
+/*
+ * TFTF_NVM_OFFSET/SIZE correspond to the NVM partition in the partition
+ * table
+ */
+#define TFTF_NVM_SIZE U(0x600000)
+#define TFTF_NVM_OFFSET U(0x20000000)
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH U(4)
+
+/* GIC related addresses from datasheet */
+#define GICD_REG_BASE U(0xe2000000)
+#define GICR_REG_BASE U(0xe2060000)
+
+/* GICv3 is used, dummy definition to resolve build failure */
+#define GICC_REG_BASE U(0xe2000000)
+
+/*
+ * Memory mapped devices that we must create MMU mappings for them
+ */
+#define GIC_BASE GICD_REG_BASE
+#define GIC_SIZE U(0x01000000)
+
+#define TTC_BASE U(0xF1DC0000)
+#define TTC_SIZE U(0x00010000)
+
+#define SYS_CNT_BASE1 TTC_BASE
+#define SYS_CNT_SIZE TTC_SIZE
+
+#define LPD_IOU_SLCR U(0xF19A0000)
+#define LPD_IOU_SLCR_SIZE U(0x00010000)
+
+/* ARM PL011 UART */
+#define PL011_UART0_BASE U(0xf1920000)
+#define PL011_BAUDRATE U(115200)
+#define PL011_UART_CLK_IN_HZ U(100000000)
+
+#define PLAT_ARM_UART_BASE PL011_UART0_BASE
+#define PLAT_ARM_UART_SIZE U(0x1000)
+
+#define CRASH_CONSOLE_BASE PL011_UART0_BASE
+#define CRASH_CONSOLE_SIZE PLAT_ARM_UART_SIZE
+
+/*******************************************************************************
+ * Non-Secure Software Generated Interrupts IDs
+ ******************************************************************************/
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_1 1
+#define IRQ_NS_SGI_2 2
+#define IRQ_NS_SGI_3 3
+#define IRQ_NS_SGI_4 4
+#define IRQ_NS_SGI_5 5
+#define IRQ_NS_SGI_6 6
+#define IRQ_NS_SGI_7 7
+
+/* Per-CPU Hypervisor Timer Interrupt ID */
+#define IRQ_PCPU_HP_TIMER U(29)
+/* Datasheet: TIME00 event*/
+#define IRQ_CNTPSIRQ1 U(29)
+
+/* Refer to AM011(v1.5), Chapter 50, Page 430 */
+#define PLAT_MAX_SPI_OFFSET_ID U(223)
+
+/*
+ * Times(in ms) used by test code for completion of different events.
+ */
+#define PLAT_SUSPEND_ENTRY_TIME U(15)
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME U(30)
+
+/*
+ * Dummy definitions that we need just to compile...
+ */
+#define ARM_SECURE_SERVICE_BUFFER_BASE U(0)
+#define ARM_SECURE_SERVICE_BUFFER_SIZE U(100)
+
+/* LPD_SWDT_INT, AM011(v1.5), Chapter 50, Page 428 */
+#define IRQ_TWDOG_INTID U(0x51)
+
+#define TTC_TIMER_IRQ U(75)
+#define TTC_CLK_SEL_OFFSET U(0x360)
+
+#endif /* PLATFORM_DEF_H */
diff --git a/plat/xilinx/versal_net/include/util.h b/plat/xilinx/versal_net/include/util.h
new file mode 100644
index 000000000..840c14fbf
--- /dev/null
+++ b/plat/xilinx/versal_net/include/util.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <platform_def.h>
+
+#define CPU_DEF(cluster, cpu) { cluster, cpu }
+
+#if (PLATFORM_CORE_COUNT_PER_CLUSTER == 1U)
+#define CLUSTER_DEF(cluster) \
+ CPU_DEF(cluster, 0)
+#elif (PLATFORM_CORE_COUNT_PER_CLUSTER == 2U)
+#define CLUSTER_DEF(cluster) \
+ CPU_DEF(cluster, 0), \
+ CPU_DEF(cluster, 1)
+#elif (PLATFORM_CORE_COUNT_PER_CLUSTER == 4U)
+#define CLUSTER_DEF(cluster) \
+ CPU_DEF(cluster, 0), \
+ CPU_DEF(cluster, 1), \
+ CPU_DEF(cluster, 2), \
+ CPU_DEF(cluster, 3)
+#endif
+
+#endif /* UTIL_H */
diff --git a/plat/xilinx/versal_net/platform.mk b/plat/xilinx/versal_net/platform.mk
new file mode 100644
index 000000000..90d6d7361
--- /dev/null
+++ b/plat/xilinx/versal_net/platform.mk
@@ -0,0 +1,28 @@
+#
+# Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLNX_COMMON_PATH := plat/xilinx/common
+VERSAL_NET_PATH := plat/xilinx/versal_net
+
+PLAT_INCLUDES := -I${VERSAL_NET_PATH}/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2v3.c \
+ drivers/arm/gic/gic_common.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/gic/gic_v3.c \
+ drivers/arm/pl011/${ARCH}/pl011_console.S \
+ drivers/arm/timer/private_timer.c \
+ drivers/console/console.c \
+ ${VERSAL_NET_PATH}/versal_net_setup.c \
+ ${VERSAL_NET_PATH}/versal_net_pwr_state.c \
+ ${VERSAL_NET_PATH}/aarch64/plat_helpers.S \
+ ${XLNX_COMMON_PATH}/timer/timers.c
+
+PLAT_TESTS_SKIP_LIST := ${VERSAL_NET_PATH}/tests_to_skip.txt
+
+ifeq ($(USE_NVM),1)
+$(error "Versal NET port of TFTF doesn't currently support USE_NVM=1")
+endif
diff --git a/plat/xilinx/versal_net/tests_to_skip.txt b/plat/xilinx/versal_net/tests_to_skip.txt
new file mode 100644
index 000000000..d5c3a3930
--- /dev/null
+++ b/plat/xilinx/versal_net/tests_to_skip.txt
@@ -0,0 +1,68 @@
+#
+# Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+################################################################################
+# Disable the listed tests for Versal NET Platform.
+################################################################################
+
+#TESTS: tftf-validation
+Framework Validation/Events API
+Framework Validation/NVM serialisation
+
+#TESTS: Timer framework Validation
+Timer framework Validation
+
+#TESTS: Boot requirement tests
+Boot requirement tests
+
+#TESTS: CPU Hotplug
+CPU Hotplug/CPU hotplug
+CPU Hotplug/CPU already on
+CPU Hotplug/Context ID passing
+CPU Hotplug/Invalid entry point
+
+#TESTS: PSCI System Suspend Validation
+PSCI System Suspend Validation/System suspend with cores on
+PSCI System Suspend Validation/Suspend system with cores in suspend
+
+#TESTS: CPU extensions
+CPU extensions/PMUv3 SMC counter preservation
+
+#TESTS: Performance tests
+Performance tests/Test cluster power up latency
+
+#TESTS: FF-A
+FF-A Setup and Discovery/FF-A RXTX unmap SP rxtx buffer
+FF-A Setup and Discovery/Test FFA_PARTITION_INFO_GET v1.0
+FF-A Memory Sharing/Lend memory, clear flag set
+SIMD,SVE Registers context/Check that SIMD registers context is preserved
+FF-A Interrupt
+FF-A Notifications
+
+#TESTS: AMD-Xilinx tests
+AMD-Xilinx tests
+
+#TESTS: psci
+PSCI Affinity Info/Affinity info level0 powerdown
+PSCI CPU Suspend/CPU suspend to powerdown at level 0
+PSCI CPU Suspend/CPU suspend to powerdown at level 1
+PSCI CPU Suspend/CPU suspend to powerdown at level 2
+PSCI CPU Suspend/CPU suspend to standby at level 0
+PSCI CPU Suspend/CPU suspend to standby at level 1
+PSCI CPU Suspend/CPU suspend to standby at level 2
+PSCI CPU Suspend in OSI mode
+PSCI System Suspend Validation/System suspend multiple times
+PSCI System Suspend Validation/system suspend from all cores
+PSCI System Suspend Validation/Validate suspend to RAM functionality
+
+#TESTS: el3-power-state
+EL3 power state parser validation
+
+#TESTS: runtime-instrumentation
+Runtime Instrumentation Validation
+
+#TESTS: debugfs
+DebugFS
diff --git a/plat/xilinx/versal_net/versal_net_pwr_state.c b/plat/xilinx/versal_net/versal_net_pwr_state.c
new file mode 100644
index 000000000..a87331eb3
--- /dev/null
+++ b/plat/xilinx/versal_net/versal_net_pwr_state.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+
+/*
+ * State IDs for local power states.
+ */
+#define VERSAL_NET_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define VERSAL_NET_OFF_STATE_ID 0 /* Valid for CPUs and Clusters */
+
+/*
+ * Suspend depth definitions for each power state
+ */
+typedef enum {
+ VERSAL_NET_RUN_DEPTH = 0,
+ VERSAL_NET_RETENTION_DEPTH,
+ VERSAL_NET_OFF_DEPTH,
+} suspend_depth_t;
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+ {VERSAL_NET_RETENTION_DEPTH, VERSAL_NET_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+ {VERSAL_NET_OFF_DEPTH, VERSAL_NET_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible for the cluster */
+static const plat_state_prop_t cluster_state_prop[] = {
+ {VERSAL_NET_OFF_DEPTH, VERSAL_NET_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible for the system level */
+static const plat_state_prop_t system_state_prop[] = {
+ {VERSAL_NET_OFF_DEPTH, VERSAL_NET_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+ switch (level) {
+ case MPIDR_AFFLVL0:
+ return core_state_prop;
+ case MPIDR_AFFLVL1:
+ return cluster_state_prop;
+ case MPIDR_AFFLVL2:
+ return system_state_prop;
+ default:
+ return NULL;
+ }
+}
diff --git a/plat/xilinx/versal_net/versal_net_setup.c b/plat/xilinx/versal_net/versal_net_setup.c
new file mode 100644
index 000000000..40fe2c4f5
--- /dev/null
+++ b/plat/xilinx/versal_net/versal_net_setup.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch.h>
+#include <drivers/arm/arm_gic.h>
+#include <drivers/console.h>
+#include <platform.h>
+#include <tftf_lib.h>
+
+#include <platform_def.h>
+#include <util.h>
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} versal_net_cores[PLATFORM_CORE_COUNT] = {
+ CLUSTER_DEF(0),
+ CLUSTER_DEF(1),
+ CLUSTER_DEF(2),
+ CLUSTER_DEF(3)
+};
+
+
+static const mmap_region_t mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE + TFTF_NVM_OFFSET, TFTF_NVM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(GIC_BASE, GIC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(CRASH_CONSOLE_BASE, CRASH_CONSOLE_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(TTC_BASE, TTC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(LPD_IOU_SLCR, LPD_IOU_SLCR_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ {0}
+};
+
+/* Power Domain Tree Descriptor array */
+const unsigned char versal_net_pwr_tree_desc[] = {
+ /* Number of root nodes */
+ 1,
+ /* Number of clusters */
+ PLATFORM_CLUSTER_COUNT,
+ /* Number of children for the first cluster node */
+ PLATFORM_CORE_COUNT_PER_CLUSTER,
+ PLATFORM_CORE_COUNT_PER_CLUSTER,
+ PLATFORM_CORE_COUNT_PER_CLUSTER,
+ PLATFORM_CORE_COUNT_PER_CLUSTER
+};
+
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return versal_net_pwr_tree_desc;
+}
+
+/*
+ * Generate the MPID from the core position.
+ */
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ return (uint64_t)make_mpid(versal_net_cores[core_pos].cluster_id,
+ versal_net_cores[core_pos].cpu_id);
+}
+
+void tftf_plat_arch_setup(void)
+{
+ tftf_plat_configure_mmu();
+}
+
+void tftf_early_platform_setup(void)
+{
+ console_init(CRASH_CONSOLE_BASE, PL011_UART_CLK_IN_HZ, PL011_BAUDRATE);
+}
+
+void tftf_platform_setup(void)
+{
+ arm_gic_init(GICC_REG_BASE, GICD_REG_BASE, GICR_REG_BASE);
+ arm_gic_setup_global();
+ arm_gic_setup_local();
+}
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+ return mmap;
+}
diff --git a/plat/xilinx/zynqmp/aarch64/plat_helpers.S b/plat/xilinx/zynqmp/aarch64/plat_helpers.S
new file mode 100644
index 000000000..48155bbab
--- /dev/null
+++ b/plat/xilinx/zynqmp/aarch64/plat_helpers.S
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <drivers/console.h>
+
+ .globl platform_get_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+
+/*----------------------------------------------------------------------
+ * unsigned int platform_get_core_pos(unsigned long mpid)
+ *
+ * Function to calculate the core position.
+ * Return 0 to 3 as logical CPU ID.
+*/
+func platform_get_core_pos
+ lsr x1, x0, #MPIDR_AFF0_SHIFT
+ and x1, x1, #MPIDR_AFFLVL_MASK /* core id */
+ lsr x2, x0, #MPIDR_AFF1_SHIFT
+ and x2, x2, #MPIDR_AFFLVL_MASK /* cluster id */
+
+ /* core_id > PLATFORM_CORES_CLUSTER */
+ mov x0, #-1
+ cmp x1, #(PLATFORM_CORES_PER_CLUSTER - 1)
+ b.hi 1f
+
+ /* cluster_id > PLATFORM_CLUSTER_COUNT */
+ cmp x2, #(PLATFORM_CLUSTER_COUNT - 1)
+ b.hi 1f
+
+ /* CorePos = CoreId + (ClusterId * cpus per cluster) */
+ mov x3, #PLATFORM_CORES_PER_CLUSTER
+ mul x3, x2, x3
+ add x0, x1, x3
+
+1:
+ ret
+endfunc platform_get_core_pos
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0 - x4
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, ZYNQMP_UART_BASE
+ mov_imm x1, ZYNQMP_CRASH_UART_CLK_IN_HZ
+ mov_imm x2, ZYNQMP_UART_BAUDRATE
+ b console_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(int c)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, ZYNQMP_UART_BASE
+ b console_putc
+endfunc plat_crash_console_putc
+
+ /* ---------------------------------------------
+ * int plat_crash_console_flush()
+ * Function to force a write of all buffered
+ * data that hasn't been output.
+ * Out : return -1 on error else return 0.
+ * Clobber list : r0 - r1
+ * ---------------------------------------------
+ */
+func plat_crash_console_flush
+ mov_imm x1, ZYNQMP_UART_BASE
+ b console_flush
+endfunc plat_crash_console_flush
diff --git a/plat/xilinx/zynqmp/include/platform_def.h b/plat/xilinx/zynqmp/include/platform_def.h
new file mode 100644
index 000000000..cb3a70769
--- /dev/null
+++ b/plat/xilinx/zynqmp/include/platform_def.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <utils_def.h>
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+#define TFTF_BASE U(0x8000000)
+
+#define DRAM_BASE 0x0
+#define DRAM_SIZE 0x80000000
+
+#define PLATFORM_CLUSTER_COUNT U(1)
+#define PLATFORM_CORE_COUNT_PER_CLUSTER 4
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * \
+ PLATFORM_CORE_COUNT_PER_CLUSTER)
+#define PLATFORM_CORES_PER_CLUSTER PLATFORM_CORE_COUNT_PER_CLUSTER
+
+#define PLATFORM_NUM_AFFS (PLATFORM_CORE_COUNT + \
+ PLATFORM_CLUSTER_COUNT + 1)
+
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+#define PLAT_MAX_PWR_LEVEL MPIDR_AFFLVL2
+
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+#define PLATFORM_STACK_SIZE 0x440
+#define PCPU_DV_MEM_STACK_SIZE 0x100
+
+#define TFTF_NVM_SIZE 0x600000
+#define TFTF_NVM_OFFSET 0x20000000
+
+/* total number of system nodes implemented by the platform */
+#define PLATFORM_SYSTEM_COUNT U(1)
+
+/* UG1085 - system interrupts table */
+#define PLAT_MAX_SPI_OFFSET_ID 229
+
+/* Local state bit width for each level in the state-ID field of power state */
+#define PLAT_LOCAL_PSTATE_WIDTH 4
+
+#define PLAT_MAX_PWR_STATES_PER_LVL 2
+
+#define IRQ_PCPU_NS_TIMER 51
+
+#define IRQ_CNTPSIRQ1 80
+
+#define PLAT_SUSPEND_ENTRY_TIME 15
+#define PLAT_SUSPEND_ENTRY_EXIT_TIME 30
+
+#define IRQ_PCPU_HP_TIMER 26
+
+#define ZYNQMP_UART0_BASE 0xFF000000
+#define ZYNQMP_UART1_BASE 0xFF010000
+
+#define ZYNQMP_UART_BASE ZYNQMP_UART0_BASE
+#define CRASH_CONSOLE_SIZE 0x1000
+
+#define ZYNQMP_CRASH_UART_CLK_IN_HZ 100000000
+#define ZYNQMP_UART_BAUDRATE 115200
+
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/* Non-Secure Software Generated Interrupts IDs */
+
+#define IRQ_NS_SGI_0 0
+#define IRQ_NS_SGI_1 1
+#define IRQ_NS_SGI_2 2
+#define IRQ_NS_SGI_3 3
+#define IRQ_NS_SGI_4 4
+#define IRQ_NS_SGI_5 5
+#define IRQ_NS_SGI_6 6
+#define IRQ_NS_SGI_7 7
+
+/* Platform specific page table and MMU setup constants */
+
+#define PLAT_PHY_ADDR_SPACE_SIZE (ULL(1) << 32)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (ULL(1) << 32)
+
+/* Translation table constants */
+#define MAX_XLAT_TABLES 8
+#define MAX_MMAP_REGIONS 16
+
+/* ZYNQMP memory map related constants */
+
+/* Aggregate of all devices in the first GB */
+#define DEVICE0_BASE U(0xFF000000)
+#define DEVICE0_SIZE U(0x00E00000)
+#define DEVICE1_BASE U(0xF9000000)
+#define DEVICE1_SIZE U(0x00800000)
+
+/* GIC-400 & interrupt handling related constants */
+
+#define GIC_BASE DEVICE1_BASE
+#define GIC_SIZE 0x00080000
+#define BASE_GICD_BASE 0xF9010000
+#define BASE_GICC_BASE 0xF9020000
+#define BASE_GICH_BASE 0xF9040000
+#define BASE_GICV_BASE 0xF9060000
+
+#define TTC_BASE U(0xff140000)
+#define TTC_SIZE U(0x00010000)
+
+#define SYS_CNT_BASE1 TTC_BASE
+#define SYS_CNT_SIZE TTC_SIZE
+
+/* timer */
+#define LPD_IOU_SLCR U(0xff180000)
+#define LPD_IOU_SLCR_SIZE U(0x00010000)
+#define TTC_TIMER_IRQ U(77)
+#define TTC_CLK_SEL_OFFSET U(0x380)
+#define IRQ_TWDOG_INTID TTC_TIMER_IRQ
+#endif /* __PLATFORM_DEF_H__ */
diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk
new file mode 100644
index 000000000..201c2ee12
--- /dev/null
+++ b/plat/xilinx/zynqmp/platform.mk
@@ -0,0 +1,37 @@
+#
+# Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+XLNX_COMMON_PATH := plat/xilinx/common
+ZYNQMP_PATH := plat/xilinx/zynqmp
+
+PLAT_INCLUDES += -Iplat/xilinx/zynqmp/include/
+
+PLAT_SOURCES := drivers/arm/gic/arm_gic_v2.c \
+ drivers/arm/gic/gic_common.c \
+ drivers/arm/gic/gic_v2.c \
+ drivers/arm/timer/private_timer.c \
+ drivers/cadence/uart/aarch64/cdns_console.S \
+ plat/xilinx/zynqmp/aarch64/plat_helpers.S \
+ plat/xilinx/zynqmp/zynqmp_pwr_state.c \
+ plat/xilinx/zynqmp/zynqmp_topology.c \
+ plat/xilinx/zynqmp/zynqmp_setup.c \
+ ${XLNX_COMMON_PATH}/timer/timers.c
+
+PLAT_TESTS_SKIP_LIST := plat/xilinx/zynqmp/tests_to_skip.txt
+
+TFTF_CFLAGS += -Wno-maybe-uninitialized -Wno-unused-variable
+
+ENABLE_ASSERTIONS := 1
+
+PLAT_SUPPORTS_NS_RESET := 1
+
+# Process PLAT_SUPPORTS_NS_RESET flag
+$(eval $(call assert_boolean,PLAT_SUPPORTS_NS_RESET))
+$(eval $(call add_define,TFTF_DEFINES,PLAT_SUPPORTS_NS_RESET))
+
+ifeq ($(USE_NVM),1)
+$(error "zynqmp port of TFTF doesn't currently support USE_NVM=1")
+endif
diff --git a/plat/xilinx/zynqmp/tests_to_skip.txt b/plat/xilinx/zynqmp/tests_to_skip.txt
new file mode 100644
index 000000000..9c32ae281
--- /dev/null
+++ b/plat/xilinx/zynqmp/tests_to_skip.txt
@@ -0,0 +1,65 @@
+#
+# Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+
+################################################################################
+# Disable the listed tests for zynqmp platform
+################################################################################
+#TESTS: tftf-validation
+Framework Validation/IRQ handling
+Framework Validation/Events API
+
+#TESTS: Timer framework Validation
+Timer framework Validation/Target timer to a power down cpu
+Timer framework Validation/Test scenario where multiple CPUs call same timeout
+Timer framework Validation/Stress test the timer framework
+
+#TESTS: Boot requirement tests
+Boot requirement tests
+
+#TESTS: CPU Hotplug/
+CPU Hotplug
+
+#TESTS: PSCI System Suspend Validation
+PSCI System Suspend Validation
+
+#TESTS: psci
+PSCI Affinity Info/Affinity info level0 powerdown
+PSCI CPU Suspend/CPU suspend to powerdown at level 0
+PSCI CPU Suspend/CPU suspend to powerdown at level 1
+PSCI CPU Suspend/CPU suspend to powerdown at level 2
+PSCI CPU Suspend/CPU suspend to standby at level 0
+PSCI CPU Suspend/CPU suspend to standby at level 1
+PSCI CPU Suspend/CPU suspend to standby at level 2
+PSCI CPU Suspend in OSI mode/CPU suspend to powerdown at level 0 in OSI mode
+PSCI CPU Suspend in OSI mode/CPU suspend to powerdown at level 1 in OSI mode
+PSCI CPU Suspend in OSI mode/CPU suspend to powerdown at level 2 in OSI mode
+PSCI CPU Suspend in OSI mode/CPU suspend to powerdown at level 3 in OSI mode
+PSCI CPU Suspend in OSI mode/CPU suspend to standby at level 0 in OSI mode
+PSCI CPU Suspend in OSI mode/CPU suspend to standby at level 1 in OSI mode
+PSCI CPU Suspend in OSI mode/CPU suspend to standby at level 2 in OSI mode
+PSCI System Suspend Validation/System suspend multiple times
+PSCI System Suspend Validation/system suspend from all cores
+PSCI System Suspend Validation/Validate suspend to RAM functionality
+
+#TESTS: psci stat
+PSCI STAT/Stats test cases for CPU OFF
+PSCI STAT/Stats test cases after system suspend
+
+#TESTS: el3-power-state
+EL3 power state parser validation
+
+#TESTS: SIMD
+SIMD,SVE Registers context/Check that SIMD registers context is preserved
+
+#TESTS: psci-extensive
+PSCI CPU ON OFF Stress Tests/Repeated shutdown of all cores to stress test CPU_ON, CPU_SUSPEND and CPU_OFF
+PSCI CPU ON OFF Stress Tests/PSCI CPU ON OFF stress test
+PSCI CPU ON OFF Stress Tests/Repeated hotplug of all cores to stress test CPU_ON and CPU_OFF
+PSCI CPU ON OFF Stress Tests/Random hotplug cores in a large iteration to stress boot path code
+
+#TESTS: SDEI
+SDEI
diff --git a/plat/xilinx/zynqmp/zynqmp_pwr_state.c b/plat/xilinx/zynqmp/zynqmp_pwr_state.c
new file mode 100644
index 000000000..4f3bc4b0c
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_pwr_state.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stddef.h>
+
+#include <arch.h>
+#include <platform.h>
+#include <psci.h>
+
+/*
+ * State IDs for local power states.
+ */
+#define ZYNQMP_RETENTION_STATE_ID 1 /* Valid for only CPUs */
+#define ZYNQMP_OFF_STATE_ID 0 /* Valid for CPUs and Clusters */
+
+/*
+ * Suspend depth definitions for each power state
+ */
+typedef enum {
+ ZYNQMP_RUN_DEPTH = 0,
+ ZYNQMP_RETENTION_DEPTH,
+ ZYNQMP_OFF_DEPTH,
+} suspend_depth_t;
+
+/* The state property array with details of idle state possible for the core */
+static const plat_state_prop_t core_state_prop[] = {
+ {ZYNQMP_RETENTION_DEPTH, ZYNQMP_RETENTION_STATE_ID, PSTATE_TYPE_STANDBY},
+ {ZYNQMP_OFF_DEPTH, ZYNQMP_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible for the cluster */
+static const plat_state_prop_t cluster_state_prop[] = {
+ {ZYNQMP_OFF_DEPTH, ZYNQMP_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+/* The state property array with details of idle state possible for the system level */
+static const plat_state_prop_t system_state_prop[] = {
+ {ZYNQMP_OFF_DEPTH, ZYNQMP_OFF_STATE_ID, PSTATE_TYPE_POWERDOWN},
+ {0},
+};
+
+const plat_state_prop_t *plat_get_state_prop(unsigned int level)
+{
+ switch (level) {
+ case MPIDR_AFFLVL0:
+ return core_state_prop;
+ case MPIDR_AFFLVL1:
+ return cluster_state_prop;
+ case MPIDR_AFFLVL2:
+ return system_state_prop;
+ default:
+ return NULL;
+ }
+}
diff --git a/plat/xilinx/zynqmp/zynqmp_setup.c b/plat/xilinx/zynqmp/zynqmp_setup.c
new file mode 100644
index 000000000..4a7d37119
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_setup.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <drivers/console.h>
+#include <drivers/arm/gic_common.h>
+#include <drivers/arm/gic_v2.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <xlat_tables_v2.h>
+#include <drivers/console.h>
+#include <debug.h>
+#include <drivers/arm/arm_gic.h>
+
+static const mmap_region_t zynqmp_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE + TFTF_NVM_OFFSET, TFTF_NVM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(GIC_BASE, GIC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(ZYNQMP_UART_BASE, CRASH_CONSOLE_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(TTC_BASE, TTC_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(LPD_IOU_SLCR, LPD_IOU_SLCR_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ {0}
+};
+
+const mmap_region_t *tftf_platform_get_mmap(void)
+{
+ return zynqmp_mmap;
+}
+
+void tftf_plat_arch_setup(void)
+{
+ tftf_plat_configure_mmu();
+}
+
+void tftf_early_platform_setup(void)
+{
+ console_init(ZYNQMP_UART_BASE, ZYNQMP_CRASH_UART_CLK_IN_HZ,
+ ZYNQMP_UART_BAUDRATE);
+}
+
+void plat_arm_gic_init(void)
+{
+ arm_gic_init(BASE_GICC_BASE, BASE_GICD_BASE, 0);
+}
+
+void tftf_platform_setup(void)
+{
+ plat_arm_gic_init();
+ arm_gic_setup_global();
+ arm_gic_setup_local();
+}
diff --git a/plat/xilinx/zynqmp/zynqmp_topology.c b/plat/xilinx/zynqmp/zynqmp_topology.c
new file mode 100644
index 000000000..f8262a8c3
--- /dev/null
+++ b/plat/xilinx/zynqmp/zynqmp_topology.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <assert.h>
+#include <mmio.h>
+#include <plat_topology.h>
+#include <platform_def.h>
+#include <stddef.h>
+#include <tftf_lib.h>
+
+static const struct {
+ unsigned int cluster_id;
+ unsigned int cpu_id;
+} zynqmp_cores[PLATFORM_CORE_COUNT] = {
+ { 0, 0 },
+ { 0, 1 },
+ { 0, 2 },
+ { 0, 3 }
+};
+
+static const unsigned char zynqmp_power_domain_tree_desc[] = {
+ /* Number of root nodes */
+ PLATFORM_SYSTEM_COUNT,
+ /* Number of children of root node */
+ PLATFORM_CLUSTER_COUNT,
+ /* Number of children for the cluster */
+ PLATFORM_CORES_PER_CLUSTER
+};
+
+const unsigned char *tftf_plat_get_pwr_domain_tree_desc(void)
+{
+ return zynqmp_power_domain_tree_desc;
+}
+
+uint64_t tftf_plat_get_mpidr(unsigned int core_pos)
+{
+ assert(core_pos < PLATFORM_CORE_COUNT);
+
+ return make_mpid(zynqmp_cores[core_pos].cluster_id,
+ zynqmp_cores[core_pos].cpu_id);
+}
diff --git a/readme.rst b/readme.rst
index 4fcd4921c..54724b898 100644
--- a/readme.rst
+++ b/readme.rst
@@ -60,7 +60,7 @@ To find out more about Trusted Firmware-A Tests, please
.. _Firmware update: https://trustedfirmware-a.readthedocs.io/en/latest/components/firmware-update.html
.. _EL3 payload: https://trustedfirmware-a.readthedocs.io/en/latest/design/alt-boot-flows.html#el3-payloads-alternative-boot-flow
-.. _Secure partition: https://trustedfirmware-a.readthedocs.io/en/latest/components/secure-partition-manager-design.html
+.. _Secure partition: https://trustedfirmware-a.readthedocs.io/en/latest/components/secure-partition-manager.html
.. _view the full documentation: https://trustedfirmware-a-tests.readthedocs.io/
.. _trustedfirmware.org: http://www.trustedfirmware.org
diff --git a/realm/aarch64/realm_entrypoint.S b/realm/aarch64/realm_entrypoint.S
new file mode 100644
index 000000000..0ea02dbd2
--- /dev/null
+++ b/realm/aarch64/realm_entrypoint.S
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <realm_def.h>
+
+ .globl realm_entrypoint
+
+.section .bss.stacks
+.align 16
+ .fill REALM_STACK_SIZE * MAX_REC_COUNT
+stacks_end:
+
+func realm_entrypoint
+ /* Save x0 - context_id */
+ mov x20, x0
+ mrs x0, mpidr_el1
+ mov_imm x1, MPID_MASK
+ and x0, x0, x1
+
+ /* Setup the stack pointer. */
+ bl realm_setup_my_stack
+
+ /* mpidr 0 is assumed to be primary CPU, jump to warmboot otherwise */
+ cbnz x0, realm_warmboot_endpoint
+
+ /* Primary CPU Only */
+ /* Clear BSS */
+ ldr x0, =__REALM_BSS_START__
+ adr x1, realm_entrypoint
+ add x0, x1, x0
+ ldr x1, =__REALM_BSS_SIZE__
+ bl zeromem16
+
+ /*
+ * Invalidate the data cache for the whole Realm.
+ * This prevents re-use of stale data cache entries from
+ * prior bootloader stages.
+ */
+ adrp x0, __REALM_TEXT_START__
+ add x0, x0, realm_entrypoint
+ adrp x1, __REALM_BSS_END__
+ add x1, x1, realm_entrypoint
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ /* Relocate symbols */
+pie_fixup:
+ ldr x0, =pie_fixup
+ and x0, x0, #~(PAGE_ALIGNMENT - 1)
+ mov x1, REALM_MAX_LOAD_IMG_SIZE
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+
+ /* Initialize architectural state. */
+ bl arch_init
+#if ENABLE_PAUTH
+ bl pauth_init_enable
+#endif
+
+loop:
+ /* And jump to the C entrypoint. */
+ bl realm_payload_main
+ b loop
+
+realm_warmboot_endpoint:
+ /* Initialize architectural state. */
+ bl arch_init
+#if ENABLE_PAUTH
+ bl pauth_init_enable
+#endif
+ mov x0, x20
+ b realm_secondary_entrypoint
+endfunc realm_entrypoint
+
+/*
+ * Setup the stack pointer.
+ * x0 = mpidr
+ * clobbers x1,x2
+ */
+func realm_setup_my_stack
+ adr x1, stacks_end
+ mov x2, REALM_STACK_SIZE
+ mul x2, x0, x2
+ sub sp, x1, x2
+ ret
+endfunc realm_setup_my_stack
+
+/* Initialize architectural state. */
+func arch_init
+ /* Set the exception vectors. */
+ adrp x0, realm_vector
+ add x0, x0, :lo12:realm_vector
+ msr vbar_el1, x0
+ isb
+
+ /* Enable the instruction cache and stack pointer alignment checks. */
+ mov_imm x0, (SCTLR_EL1_RES1 | SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
+ msr sctlr_el1, x0
+
+ /*
+ * Set CPACR_EL1.FPEN=11 no EL1/0 trapping of
+ * SVE/Adv. SIMD/FP instructions.
+ */
+ mov x1, CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_NONE)
+ mrs x0, cpacr_el1
+ orr x0, x0, x1
+ mov x1, CPACR_EL1_ZEN(CPACR_EL1_ZEN_TRAP_NONE)
+ orr x0, x0, x1
+ mov x1, CPACR_EL1_SMEN(CPACR_EL1_SMEN_TRAP_NONE)
+ orr x0, x0, x1
+ msr cpacr_el1, x0
+ isb
+
+ ret
+endfunc arch_init
diff --git a/realm/aarch64/realm_exceptions.S b/realm/aarch64/realm_exceptions.S
new file mode 100644
index 000000000..210dd3e42
--- /dev/null
+++ b/realm/aarch64/realm_exceptions.S
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .globl realm_vector
+
+/*
+ * Exception vector code for unhandled exceptions.
+ * Print a crash dump on the UART and loops forever.
+ */
+.macro unhandled_exception name
+ vector_entry \name
+ b crash_dump
+ end_vector_entry \name
+.endm
+
+vector_base realm_vector
+
+ /*
+ * Current EL with SP0 : 0x0 - 0x200.
+ */
+unhandled_exception sync_sp0
+unhandled_exception irq_sp0
+unhandled_exception fiq_sp0
+unhandled_exception serr_sp0
+
+ /*
+ * Current EL with SPx : 0x200 - 0x400.
+ */
+vector_entry sync_spx
+ b sync_exception_vector_entry
+end_vector_entry sync_spx
+
+vector_entry irq_spx
+ b interrupt_vector_entry
+end_vector_entry irq_spx
+
+vector_entry fiq_spx
+ b interrupt_vector_entry
+end_vector_entry fiq_spx
+
+unhandled_exception serr_spx
+
+ /*
+ * Lower EL using AArch64 : 0x400 - 0x600.
+ */
+unhandled_exception sync_a64
+unhandled_exception irq_a64
+unhandled_exception fiq_a64
+unhandled_exception serr_a64
+
+ /*
+ * Lower EL using AArch32 : 0x600 - 0x800.
+ */
+unhandled_exception sync_a32
+unhandled_exception irq_a32
+unhandled_exception fiq_a32
+unhandled_exception serr_a32
+
+.macro save_gp_regs
+ stp x0, x1, [sp, #0x0]
+ stp x2, x3, [sp, #0x10]
+ stp x4, x5, [sp, #0x20]
+ stp x6, x7, [sp, #0x30]
+ stp x8, x9, [sp, #0x40]
+ stp x10, x11, [sp, #0x50]
+ stp x12, x13, [sp, #0x60]
+ stp x14, x15, [sp, #0x70]
+ stp x16, x17, [sp, #0x80]
+ stp x18, x19, [sp, #0x90]
+ stp x20, x21, [sp, #0xa0]
+ stp x22, x23, [sp, #0xb0]
+ stp x24, x25, [sp, #0xc0]
+ stp x26, x27, [sp, #0xd0]
+ stp x28, x29, [sp, #0xe0]
+ /* We push xzr simply to keep the stack 16-byte aligned. */
+ stp x30, xzr, [sp, #0xf0]
+.endm
+
+.macro restore_gp_regs
+ ldp x30, xzr, [sp, #0xf0]
+ ldp x28, x29, [sp, #0xe0]
+ ldp x26, x27, [sp, #0xd0]
+ ldp x24, x25, [sp, #0xc0]
+ ldp x22, x23, [sp, #0xb0]
+ ldp x20, x21, [sp, #0xa0]
+ ldp x18, x19, [sp, #0x90]
+ ldp x16, x17, [sp, #0x80]
+ ldp x14, x15, [sp, #0x70]
+ ldp x12, x13, [sp, #0x60]
+ ldp x10, x11, [sp, #0x50]
+ ldp x8, x9, [sp, #0x40]
+ ldp x6, x7, [sp, #0x30]
+ ldp x4, x5, [sp, #0x20]
+ ldp x2, x3, [sp, #0x10]
+ ldp x0, x1, [sp, #0x0]
+.endm
+
+func sync_exception_vector_entry
+ sub sp, sp, #0x100
+ save_gp_regs
+ mov x19, sp
+ bl tftf_sync_exception_handler
+ cbnz x0, 0f
+ mov x0, x19
+ /* Save original stack pointer value on the stack */
+ add x1, x0, #0x100
+ str x1, [x0, #0xf8]
+ b realm_print_exception
+0: restore_gp_regs
+ add sp, sp, #0x100
+ eret
+endfunc sync_exception_vector_entry
+
+func interrupt_vector_entry
+ sub sp, sp, #0x100
+ save_gp_regs
+ bl realm_interrupt_handler
+ restore_gp_regs
+ add sp, sp, #0x100
+ eret
+endfunc interrupt_vector_entry
+
+func crash_dump
+ /* Save general-purpose registers on the stack. */
+ sub sp, sp, #0x100
+ save_gp_regs
+
+ /* Save original stack pointer value on the stack. */
+ add x1, sp, #0x100
+ str x1, [sp, #0xf8]
+
+ /* Print the saved CPU context on the UART. */
+ mov x0, sp
+ b realm_print_exception
+endfunc crash_dump
diff --git a/realm/include/realm_psci.h b/realm/include/realm_psci.h
new file mode 100644
index 000000000..ce41ffb53
--- /dev/null
+++ b/realm/include/realm_psci.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+
+void realm_cpu_off(void);
+u_register_t realm_cpu_on(u_register_t mpidr, uintptr_t entrypoint,
+ u_register_t context_id);
+u_register_t realm_psci_affinity_info(u_register_t target_affinity,
+ uint32_t lowest_affinity_level);
+u_register_t realm_psci_features(uint32_t psci_func_id);
diff --git a/realm/include/realm_rsi.h b/realm/include/realm_rsi.h
new file mode 100644
index 000000000..8436db963
--- /dev/null
+++ b/realm/include/realm_rsi.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef REALM_RSI_H
+#define REALM_RSI_H
+
+#include <stdint.h>
+#include <host_shared_data.h>
+#include <tftf_lib.h>
+
+#define SMC_RSI_CALL_BASE 0xC4000190
+#define SMC_RSI_FID(_x) (SMC_RSI_CALL_BASE + (_x))
+/*
+ * This file describes the Realm Services Interface (RSI) Application Binary
+ * Interface (ABI) for SMC calls made from within the Realm to the RMM and
+ * serviced by the RMM.
+ *
+ * See doc/rmm_interface.md for more details.
+ */
+
+/*
+ * The major version number of the RSI implementation. Increase this whenever
+ * the binary format or semantics of the SMC calls change.
+ */
+#define RSI_ABI_VERSION_MAJOR 1U
+
+/*
+ * The minor version number of the RSI implementation. Increase this when
+ * a bug is fixed, or a feature is added without breaking binary compatibility.
+ */
+#define RSI_ABI_VERSION_MINOR 0U
+
+#define RSI_ABI_VERSION_VAL ((RSI_ABI_VERSION_MAJOR << 16U) | \
+ RSI_ABI_VERSION_MINOR)
+
+#define RSI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16U)
+#define RSI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFFU)
+
+
+/* RSI Status code enumeration as per Section D4.3.6 of the RMM Spec */
+typedef enum {
+ /* Command completed successfully */
+ RSI_SUCCESS = 0U,
+
+ /*
+ * The value of a command input value
+ * caused the command to fail
+ */
+ RSI_ERROR_INPUT = 1U,
+
+ /*
+ * The state of the current Realm or current REC
+ * does not match the state expected by the command
+ */
+ RSI_ERROR_STATE = 2U,
+
+ /* The operation requested by the command is not complete */
+ RSI_INCOMPLETE = 3U,
+
+ RSI_ERROR_COUNT
+} rsi_status_t;
+
+struct rsi_realm_config {
+ /* IPA width in bits */
+ SET_MEMBER(unsigned long ipa_width, 0, 0x1000); /* Offset 0 */
+};
+
+/*
+ * arg0 == IPA address of target region
+ * arg1 == Size of target region in bytes
+ * arg2 == RIPAS value
+ * ret0 == Status / error
+ * ret1 == Top of modified IPA range
+ */
+
+#define RSI_HOST_CALL_NR_GPRS 31U
+
+struct rsi_host_call {
+ SET_MEMBER(struct {
+ /* Immediate value */
+ unsigned int imm; /* Offset 0 */
+ /* Registers */
+ unsigned long gprs[RSI_HOST_CALL_NR_GPRS];
+ }, 0, 0x100);
+};
+
+/*
+ * arg0 == struct rsi_host_call address
+ */
+#define RSI_HOST_CALL SMC_RSI_FID(9U)
+
+
+#define RSI_VERSION SMC_RSI_FID(0U)
+
+/*
+ * arg0 == struct rsi_realm_config address
+ */
+#define RSI_REALM_CONFIG SMC_RSI_FID(6U)
+#define RSI_IPA_STATE_SET SMC_RSI_FID(7U)
+#define RSI_IPA_STATE_GET SMC_RSI_FID(8U)
+
+typedef enum {
+ RSI_EMPTY = 0U,
+ RSI_RAM,
+ RSI_DESTROYED
+} rsi_ripas_type;
+
+typedef enum {
+ RSI_ACCEPT = 0U,
+ RSI_REJECT
+} rsi_ripas_respose_type;
+
+#define RSI_NO_CHANGE_DESTROYED 0UL
+#define RSI_CHANGE_DESTROYED 1UL
+
+/* Request RIPAS of a target IPA range to be changed to a specified value. */
+u_register_t rsi_ipa_state_set(u_register_t base,
+ u_register_t top,
+ rsi_ripas_type ripas,
+ u_register_t flag,
+ u_register_t *new_base,
+ rsi_ripas_respose_type *response);
+
+/* Request RIPAS of a target IPA */
+u_register_t rsi_ipa_state_get(u_register_t adr, rsi_ripas_type *ripas);
+
+/* This function return RSI_ABI_VERSION */
+u_register_t rsi_get_version(u_register_t req_ver);
+
+/* This function will call the Host to request IPA of the NS shared buffer */
+u_register_t rsi_get_ns_buffer(void);
+
+/* This function call Host and request to exit Realm with proper exit code */
+void rsi_exit_to_host(enum host_call_cmd exit_code);
+
+#endif /* REALM_RSI_H */
diff --git a/realm/include/realm_tests.h b/realm/include/realm_tests.h
new file mode 100644
index 000000000..b58949bd0
--- /dev/null
+++ b/realm/include/realm_tests.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#ifndef REALM_TESTS_H
+#define REALM_TESTS_H
+
+bool test_pmuv3_cycle_works_realm(void);
+bool test_pmuv3_counter(void);
+bool test_pmuv3_event_works_realm(void);
+bool test_pmuv3_rmm_preserves(void);
+bool test_pmuv3_overflow_interrupt(void);
+bool test_realm_pauth_set_cmd(void);
+bool test_realm_pauth_check_cmd(void);
+bool test_realm_pauth_fault(void);
+bool test_realm_sve_rdvl(void);
+bool test_realm_sve_read_id_registers(void);
+bool test_realm_sve_probe_vl(void);
+bool test_realm_sve_ops(void);
+bool test_realm_sve_fill_regs(void);
+bool test_realm_sve_cmp_regs(void);
+bool test_realm_sve_undef_abort(void);
+bool test_realm_multiple_rec_psci_denied_cmd(void);
+bool test_realm_multiple_rec_multiple_cpu_cmd(void);
+bool test_realm_sme_read_id_registers(void);
+bool test_realm_sme_undef_abort(void);
+
+#endif /* REALM_TESTS_H */
+
diff --git a/realm/realm.ld.S b/realm/realm.ld.S
new file mode 100644
index 000000000..ca3b809b3
--- /dev/null
+++ b/realm/realm.ld.S
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+ENTRY(realm_entrypoint)
+
+#include <realm_def.h>
+
+MEMORY {
+
+ RAM (rwx): ORIGIN = 0x0, LENGTH = REALM_MAX_LOAD_IMG_SIZE
+}
+
+SECTIONS
+{
+ ASSERT(. == ALIGN(PAGE_SIZE),
+ "TEXT_START address is not aligned to PAGE_SIZE.")
+ .text : {
+ __REALM_TEXT_START__ = .;
+ *realm_entrypoint.o(.text*)
+ *(.text*)
+ *(.vectors)
+ . = NEXT(PAGE_SIZE);
+ __REALM_TEXT_END__ = .;
+ }> RAM
+
+ .rodata : {
+ . = ALIGN(PAGE_SIZE);
+ __REALM_RODATA_START__ = .;
+ *(.rodata*)
+
+ /*
+ * Keep the .got section in the RO section as it is patched
+ * prior to enabling the MMU and having the .got in RO is better for
+ * security. GOT is a table of addresses so ensure 8-byte alignment.
+ */
+ . = ALIGN(8);
+ __GOT_START__ = .;
+ *(.got)
+ __GOT_END__ = .;
+
+ . = NEXT(PAGE_SIZE);
+ __REALM_RODATA_END__ = .;
+
+ }> RAM
+
+ .data : {
+ . = ALIGN(PAGE_SIZE);
+ __REALM_DATA_START__ = .;
+ *(.data*)
+ . = ALIGN(PAGE_SIZE);
+ . = NEXT(PAGE_SIZE);
+ __REALM_DATA_END__ = .;
+ }> RAM
+
+ /*
+ * .rela.dyn needs to come after .data for the read-elf utility to parse
+ * this section correctly. Ensure 8-byte alignment so that the fields of
+ * RELA data structure are aligned.
+ */
+ . = ALIGN(8);
+ __RELA_START__ = .;
+ .rela.dyn . : {
+ }> RAM
+ __RELA_END__ = .;
+
+ .bss (NOLOAD) : {
+ . = ALIGN(PAGE_SIZE);
+ __REALM_BSS_START__ = .;
+ *(SORT_BY_ALIGNMENT(.bss*))
+ *(COMMON)
+ . = NEXT(PAGE_SIZE);
+ __REALM_BSS_END__ = .;
+ }> RAM
+ __REALM_BSS_SIZE__ = SIZEOF(.bss);
+}
diff --git a/realm/realm.mk b/realm/realm.mk
new file mode 100644
index 000000000..305c00706
--- /dev/null
+++ b/realm/realm.mk
@@ -0,0 +1,69 @@
+#
+# Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+include branch_protection.mk
+
+REALM_INCLUDES := \
+ -Itftf/framework/include \
+ -Iinclude \
+ -Iinclude/common \
+ -Iinclude/common/${ARCH} \
+ -Iinclude/lib \
+ -Iinclude/lib/extensions \
+ -Iinclude/lib/${ARCH} \
+ -Iinclude/lib/utils \
+ -Iinclude/lib/xlat_tables \
+ -Iinclude/runtime_services \
+ -Iinclude/runtime_services/host_realm_managment \
+ -Irealm \
+ -Irealm/aarch64 \
+ -Irealm/include
+
+REALM_SOURCES:= \
+ $(addprefix realm/, \
+ aarch64/realm_entrypoint.S \
+ aarch64/realm_exceptions.S \
+ realm_exception_report.c \
+ realm_debug.c \
+ realm_interrupt.c \
+ realm_multiple_rec.c \
+ realm_pauth.c \
+ realm_payload_main.c \
+ realm_pmuv3.c \
+ realm_psci.c \
+ realm_rsi.c \
+ realm_shared_data.c \
+ realm_simd.c \
+ )
+
+REALM_SOURCES += lib/${ARCH}/cache_helpers.S \
+ lib/${ARCH}/misc_helpers.S \
+ lib/smc/${ARCH}/asm_smc.S \
+ lib/smc/${ARCH}/smc.c \
+ lib/exceptions/${ARCH}/sync.c \
+ lib/locks/${ARCH}/spinlock.S \
+ lib/delay/delay.c \
+ lib/extensions/fpu/fpu.c \
+ lib/extensions/sve/aarch64/sve.c \
+ lib/extensions/sve/aarch64/sve_helpers.S \
+ lib/extensions/sme/aarch64/sme.c \
+ lib/extensions/sme/aarch64/sme_helpers.S
+
+REALM_LINKERFILE:= realm/realm.ld.S
+
+# ARMv8.3 Pointer Authentication support files
+REALM_SOURCES += lib/extensions/pauth/aarch64/pauth.c \
+ lib/extensions/pauth/aarch64/pauth_helpers.S
+
+REALM_INCLUDES += -Iinclude/lib/extensions
+
+REALM_DEFINES:=
+$(eval $(call add_define,REALM_DEFINES,ARM_ARCH_MAJOR))
+$(eval $(call add_define,REALM_DEFINES,ARM_ARCH_MINOR))
+$(eval $(call add_define,REALM_DEFINES,ENABLE_BTI))
+$(eval $(call add_define,REALM_DEFINES,ENABLE_PAUTH))
+$(eval $(call add_define,REALM_DEFINES,LOG_LEVEL))
+$(eval $(call add_define,REALM_DEFINES,IMAGE_REALM))
diff --git a/realm/realm_debug.c b/realm/realm_debug.c
new file mode 100644
index 000000000..1c4ee0370
--- /dev/null
+++ b/realm/realm_debug.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <arch_helpers.h>
+#include <host_shared_data.h>
+#include <realm_rsi.h>
+
+/*
+ * A printf formatted function used in the Realm world to log messages
+ * in the shared buffer.
+ * Locate the shared logging buffer and print its content
+ */
+void realm_printf(const char *fmt, ...)
+{
+ host_shared_data_t *guest_shared_data = realm_get_my_shared_structure();
+ char *log_buffer = (char *)guest_shared_data->log_buffer;
+ va_list args;
+
+ va_start(args, fmt);
+ if (strnlen((const char *)log_buffer, MAX_BUF_SIZE) == MAX_BUF_SIZE) {
+ (void)memset((char *)log_buffer, 0, MAX_BUF_SIZE);
+ }
+ (void)vsnprintf((char *)log_buffer +
+ strnlen((const char *)log_buffer, MAX_BUF_SIZE),
+ MAX_BUF_SIZE, fmt, args);
+ va_end(args);
+ rsi_exit_to_host(HOST_CALL_EXIT_PRINT_CMD);
+}
+
+void __attribute__((__noreturn__)) do_panic(const char *file, int line)
+{
+ realm_printf("PANIC in file: %s line: %d\n", file, line);
+ while (true) {
+ continue;
+ }
+}
+
+/* This is used from printf() when crash dump is reached */
+int console_putc(int c)
+{
+ host_shared_data_t *guest_shared_data = realm_get_my_shared_structure();
+ char *log_buffer = (char *)guest_shared_data->log_buffer;
+
+ if ((c < 0) || (c > 127)) {
+ return -1;
+ }
+ if (strnlen((const char *)log_buffer, MAX_BUF_SIZE) == MAX_BUF_SIZE) {
+ (void)memset((char *)log_buffer, 0, MAX_BUF_SIZE);
+ }
+ *((char *)log_buffer + strnlen((const char *)log_buffer, MAX_BUF_SIZE)) = c;
+
+ return c;
+}
diff --git a/realm/realm_exception_report.c b/realm/realm_exception_report.c
new file mode 100644
index 000000000..b0297d7bf
--- /dev/null
+++ b/realm/realm_exception_report.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+
+/* We save x0-x30. */
+#define GPRS_CNT 31U
+
+/* Set of registers saved by the crash_dump() assembly function in stack. */
+struct rec_regs {
+ unsigned long gprs[GPRS_CNT];
+ unsigned long sp;
+};
+
+void __dead2 realm_print_exception(const struct rec_regs *ctx)
+{
+ u_register_t mpid;
+
+ /*
+ * The instruction barrier ensures we don't read stale values of system
+ * registers.
+ */
+ isb();
+
+ mpid = read_mpidr_el1();
+ realm_printf("Unhandled exception on REC%u.\n", mpid & MPID_MASK);
+
+ /* Dump some interesting system registers. */
+ realm_printf("System registers:\n");
+ realm_printf(" MPIDR=0x%lx\n", mpid);
+ realm_printf(" ESR=0x%lx ELR=0x%lx FAR=0x%lx\n", read_esr_el1(),
+ read_elr_el1(), read_far_el1());
+ realm_printf(" SCTLR=0x%lx SPSR=0x%lx DAIF=0x%lx\n",
+ read_sctlr_el1(), read_spsr_el1(), read_daif());
+
+ /* Dump general-purpose registers. */
+ realm_printf("General-purpose registers:\n");
+ for (unsigned int i = 0U; i < GPRS_CNT; i++) {
+ realm_printf(" x%u=0x%lx\n", i, ctx->gprs[i]);
+ }
+ realm_printf(" SP=0x%lx\n", ctx->sp);
+
+ while (1) {
+ wfi();
+ }
+}
diff --git a/realm/realm_interrupt.c b/realm/realm_interrupt.c
new file mode 100644
index 000000000..7f8dc152f
--- /dev/null
+++ b/realm/realm_interrupt.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <host_realm_pmu.h>
+
+/* Realm interrupt handler */
+void realm_interrupt_handler(void)
+{
+ /* Read INTID and acknowledge interrupt */
+ unsigned long iar1_el1 = read_icv_iar1_el1();
+
+ /* Deactivate interrupt */
+ write_icv_eoir1_el1(iar1_el1);
+
+ /* Clear PMU interrupt */
+ if (iar1_el1 == PMU_VIRQ) {
+ write_pmintenclr_el1(read_pmintenset_el1());
+ isb();
+ } else {
+ panic();
+ }
+}
diff --git a/realm/realm_multiple_rec.c b/realm/realm_multiple_rec.c
new file mode 100644
index 000000000..c584cd4c0
--- /dev/null
+++ b/realm/realm_multiple_rec.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdio.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <fpu.h>
+#include <host_realm_helper.h>
+#include <host_shared_data.h>
+#include <psci.h>
+#include "realm_def.h"
+#include <realm_rsi.h>
+#include <realm_tests.h>
+#include <realm_psci.h>
+#include <tftf_lib.h>
+
+#define CXT_ID_MAGIC 0x100
+static uint64_t is_secondary_cpu_booted;
+static spinlock_t lock;
+
+static void rec1_handler(u_register_t cxt_id)
+{
+ realm_printf("running on CPU = 0x%lx cxt_id= 0x%lx\n",
+ read_mpidr_el1() & MPID_MASK, cxt_id);
+ if (cxt_id < CXT_ID_MAGIC || cxt_id > CXT_ID_MAGIC + MAX_REC_COUNT) {
+ realm_printf("Wrong cxt_id\n");
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+ }
+ spin_lock(&lock);
+ is_secondary_cpu_booted++;
+ spin_unlock(&lock);
+ realm_cpu_off();
+}
+
+static void rec2_handler(u_register_t cxt_id)
+{
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+}
+
+bool test_realm_multiple_rec_psci_denied_cmd(void)
+{
+ u_register_t ret;
+
+ is_secondary_cpu_booted = 0U;
+ ret = realm_cpu_on(1U, (uintptr_t)rec1_handler, 0x100);
+ if (ret != PSCI_E_DENIED) {
+ return false;
+ }
+
+ if (is_secondary_cpu_booted != 0U) {
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+ }
+
+ ret = realm_psci_affinity_info(1U, MPIDR_AFFLVL0);
+ if (ret != PSCI_STATE_OFF) {
+ realm_printf("CPU 1 should have been off\n");
+ return false;
+ }
+
+ ret = realm_cpu_on(2U, (uintptr_t)rec2_handler, 0x102);
+ if (ret != PSCI_E_ALREADY_ON) {
+ realm_printf("CPU 2 should have been already on\n");
+ return false;
+ }
+ return true;
+}
+
+bool test_realm_multiple_rec_multiple_cpu_cmd(void)
+{
+ unsigned int i = 1U, rec_count;
+ u_register_t ret;
+
+ realm_printf("Realm: running on CPU = 0x%lx\n", read_mpidr_el1() & MPID_MASK);
+ rec_count = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+
+ /* Check CPU_ON is supported */
+ ret = realm_psci_features(SMC_PSCI_CPU_ON);
+ if (ret != PSCI_E_SUCCESS) {
+ realm_printf("SMC_PSCI_CPU_ON not supported\n");
+ return false;
+ }
+
+ for (unsigned int j = 1U; j < rec_count; j++) {
+ ret = realm_cpu_on(j, (uintptr_t)rec1_handler, CXT_ID_MAGIC + j);
+ if (ret != PSCI_E_SUCCESS) {
+ realm_printf("SMC_PSCI_CPU_ON failed %d.\n", j);
+ return false;
+ }
+ }
+
+ /* Exit to host to allow host to run all CPUs */
+ rsi_exit_to_host(HOST_CALL_EXIT_SUCCESS_CMD);
+ /* wait for all CPUs to come up */
+ while (is_secondary_cpu_booted != rec_count - 1U) {
+ waitms(200);
+ }
+
+ /* wait for all CPUs to turn off */
+ while (i < rec_count) {
+ ret = realm_psci_affinity_info(i, MPIDR_AFFLVL0);
+ if (ret != PSCI_STATE_OFF) {
+ /* wait and query again */
+ realm_printf(" CPU %d is not off\n", i);
+ waitms(200);
+ continue;
+ }
+ i++;
+ }
+ realm_printf("All CPU are off\n");
+ return true;
+}
diff --git a/realm/realm_pauth.c b/realm/realm_pauth.c
new file mode 100644
index 000000000..31b26e717
--- /dev/null
+++ b/realm/realm_pauth.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdio.h>
+#include <arch_features.h>
+#include <assert.h>
+#include <debug.h>
+#include <pauth.h>
+#include <realm_rsi.h>
+#include <sync.h>
+
+static volatile bool set_cmd_done[MAX_REC_COUNT];
+static uint128_t pauth_keys_before[MAX_REC_COUNT][NUM_KEYS];
+static uint128_t pauth_keys_after[MAX_REC_COUNT][NUM_KEYS];
+
+static bool exception_handler(void)
+{
+ u_register_t lr = read_elr_el1();
+
+ /* Disable PAuth to avoid further PAuth faults. */
+ pauth_disable();
+
+ /* Check for PAuth exception. */
+ /* Note- PAuth decode instruction clobbers PAC Fields[63:56] in case of error. */
+ if (lr & (0xFFULL << 56U)) {
+ rsi_exit_to_host(HOST_CALL_EXIT_SUCCESS_CMD);
+ }
+
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+
+ /* Does not return. */
+ return false;
+}
+
+void dummy_func(void)
+{
+ realm_printf("shouldn't reach here.\n");
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+}
+
+bool test_realm_pauth_fault(void)
+{
+ u_register_t ptr = (u_register_t)dummy_func;
+
+ if (!is_armv8_3_pauth_present()) {
+ return false;
+ }
+
+ register_custom_sync_exception_handler(exception_handler);
+ realm_printf("overwrite LR to generate fault.\n");
+ __asm__("mov x17, x30; "
+ "mov x30, %0; " /* overwite LR. */
+ "isb; "
+ "autiasp; "
+ "ret; " /* fault on return. */
+ :
+ : "r"(ptr));
+
+ /* Does not return. */
+ return false;
+}
+
+/*
+ * TF-A is expected to allow access to key registers from lower EL's,
+ * reading the keys excercises this, on failure this will trap to
+ * EL3 and crash.
+ */
+bool test_realm_pauth_set_cmd(void)
+{
+ unsigned int rec = read_mpidr_el1() & MPID_MASK;
+
+ if (!is_armv8_3_pauth_present()) {
+ return false;
+ }
+ assert(rec < MAX_REC_COUNT);
+ pauth_test_lib_test_intrs();
+ pauth_test_lib_fill_regs_and_template(pauth_keys_before[rec]);
+ set_cmd_done[rec] = true;
+ return true;
+}
+
+bool test_realm_pauth_check_cmd(void)
+{
+ unsigned int rec = read_mpidr_el1() & MPID_MASK;
+ bool ret;
+
+ assert(rec < MAX_REC_COUNT);
+ if (!is_armv8_3_pauth_present() || !set_cmd_done[rec]) {
+ return false;
+ }
+ ret = pauth_test_lib_compare_template(pauth_keys_before[rec], pauth_keys_after[rec]);
+ realm_printf("Pauth key comparison ret=%d\n", ret);
+ return ret;
+}
diff --git a/realm/realm_payload_main.c b/realm/realm_payload_main.c
new file mode 100644
index 000000000..5eec5b820
--- /dev/null
+++ b/realm/realm_payload_main.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdio.h>
+
+#include <arch_features.h>
+#include <debug.h>
+#include <fpu.h>
+#include <host_realm_helper.h>
+#include <host_shared_data.h>
+#include <pauth.h>
+#include "realm_def.h"
+#include <realm_rsi.h>
+#include <realm_tests.h>
+#include <sync.h>
+#include <tftf_lib.h>
+
+static fpu_state_t rl_fpu_state_write;
+static fpu_state_t rl_fpu_state_read;
+/*
+ * This function reads sleep time in ms from shared buffer and spins PE
+ * in a loop for that time period.
+ */
+static void realm_sleep_cmd(void)
+{
+ uint64_t sleep = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+
+ realm_printf("going to sleep for %llums\n", sleep);
+ waitms(sleep);
+}
+
+static void realm_loop_cmd(void)
+{
+ while (true) {
+ waitms(500);
+ }
+}
+
+/*
+ * This function requests RSI/ABI version from RMM.
+ */
+static bool realm_get_rsi_version(void)
+{
+ u_register_t version = 0U;
+
+ version = rsi_get_version(RSI_ABI_VERSION_VAL);
+ if (version == (u_register_t)SMC_UNKNOWN) {
+ realm_printf("SMC_RSI_ABI_VERSION failed\n");
+ return false;
+ }
+
+ realm_printf("RSI ABI version %u.%u (expected: %u.%u)\n",
+ RSI_ABI_VERSION_GET_MAJOR(version),
+ RSI_ABI_VERSION_GET_MINOR(version),
+ RSI_ABI_VERSION_GET_MAJOR(RSI_ABI_VERSION_VAL),
+ RSI_ABI_VERSION_GET_MINOR(RSI_ABI_VERSION_VAL));
+ return true;
+}
+
+bool test_realm_set_ripas(void)
+{
+ u_register_t ret, base, new_base, top;
+ rsi_ripas_respose_type response;
+ rsi_ripas_type ripas;
+
+ base = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+ top = realm_shared_data_get_my_host_val(HOST_ARG2_INDEX);
+ realm_printf("base=0x%lx top =0x%lx\n", base, top);
+ ret = rsi_ipa_state_get(base, &ripas);
+ if (ripas != RSI_EMPTY) {
+ return false;
+ }
+
+ ret = rsi_ipa_state_set(base, top, RSI_RAM,
+ RSI_NO_CHANGE_DESTROYED, &new_base, &response);
+ if (ret != RSI_SUCCESS || response != RSI_ACCEPT) {
+ return false;
+ }
+ while (new_base < top) {
+ realm_printf("new_base=0x%lx top =0x%lx\n", new_base, top);
+ ret = rsi_ipa_state_set(new_base, top, RSI_RAM,
+ RSI_NO_CHANGE_DESTROYED, &new_base, &response);
+ if (ret != RSI_SUCCESS || response != RSI_ACCEPT) {
+ realm_printf("rsi_ipa_state_set failed\n");
+ return false;
+ }
+ }
+
+ /* Verify that RIAS has changed for range base-top. */
+ for (unsigned int i = 0U; (base + (PAGE_SIZE * i) < top); i++) {
+ ret = rsi_ipa_state_get(base + (PAGE_SIZE * i), &ripas);
+ if (ret != RSI_SUCCESS || ripas != RSI_RAM) {
+ realm_printf("rsi_ipa_state_get failed base=0x%lx, ripas=0x%x\n",
+ base + (PAGE_SIZE * i), ripas);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool test_realm_reject_set_ripas(void)
+{
+ u_register_t ret, base, new_base;
+ rsi_ripas_respose_type response;
+ rsi_ripas_type ripas;
+
+ base = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+ ret = rsi_ipa_state_get(base, &ripas);
+ if (ret != RSI_SUCCESS || ripas != RSI_EMPTY) {
+ realm_printf("Wrong initial ripas=0x%lx\n", ripas);
+ return false;
+ }
+ ret = rsi_ipa_state_set(base, base + PAGE_SIZE, RSI_RAM,
+ RSI_NO_CHANGE_DESTROYED, &new_base, &response);
+ if (ret == RSI_SUCCESS && response == RSI_REJECT) {
+ realm_printf("rsi_ipa_state_set passed response = %d\n", response);
+ ret = rsi_ipa_state_get(base, &ripas);
+ if (ret == RSI_SUCCESS && ripas == RSI_EMPTY) {
+ return true;
+ } else {
+ realm_printf("rsi_ipa_state_get failed ripas = %d\n", ripas);
+ return false;
+ }
+ }
+ realm_printf("rsi_ipa_state_set failed ret=0x%lx, response = %d\n", ret, response);
+ return false;
+}
+
+bool test_realm_dit_check_cmd(void)
+{
+ if (is_armv8_4_dit_present()) {
+ write_dit(DIT_BIT);
+ realm_printf("Testing DIT=0x%lx\n", read_dit());
+ /* Test if DIT is preserved after HOST_CALL */
+ if (read_dit() == DIT_BIT) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+static bool test_realm_instr_fetch_cmd(void)
+{
+ u_register_t base;
+ void (*func_ptr)(void);
+ rsi_ripas_type ripas;
+
+ base = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+ rsi_ipa_state_get(base, &ripas);
+ realm_printf("Initial ripas=0x%lx\n", ripas);
+ /* causes instruction abort */
+ realm_printf("Generate Instruction Abort\n");
+ func_ptr = (void (*)(void))base;
+ func_ptr();
+ /* should not return */
+ return false;
+}
+
+static bool test_realm_data_access_cmd(void)
+{
+ u_register_t base;
+ rsi_ripas_type ripas;
+
+ base = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+ rsi_ipa_state_get(base, &ripas);
+ realm_printf("Initial ripas=0x%lx\n", ripas);
+ /* causes data abort */
+ realm_printf("Generate Data Abort\n");
+ *((volatile uint64_t *)base);
+ /* should not return */
+ return false;
+}
+
+static bool realm_exception_handler(void)
+{
+ u_register_t base, far, esr;
+
+ base = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+ far = read_far_el1();
+ esr = read_esr_el1();
+
+ if (far == base) {
+ /* return ESR to Host */
+ realm_shared_data_set_my_realm_val(HOST_ARG2_INDEX, esr);
+ rsi_exit_to_host(HOST_CALL_EXIT_SUCCESS_CMD);
+ }
+ realm_printf("Realm Abort fail incorrect FAR=0x%lx ESR+0x%lx\n", far, esr);
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+
+ /* Should not return. */
+ return false;
+}
+
+/*
+ * This is the entry function for Realm payload, it first requests the shared buffer
+ * IPA address from Host using HOST_CALL/RSI, it reads the command to be executed,
+ * performs the request, and returns to Host with the execution state SUCCESS/FAILED
+ *
+ * Host in NS world requests Realm to execute certain operations using command
+ * depending on the test case the Host wants to perform.
+ */
+void realm_payload_main(void)
+{
+ bool test_succeed = false;
+
+ register_custom_sync_exception_handler(realm_exception_handler);
+ realm_set_shared_structure((host_shared_data_t *)rsi_get_ns_buffer());
+ if (realm_get_my_shared_structure() != NULL) {
+ uint8_t cmd = realm_shared_data_get_my_realm_cmd();
+
+ switch (cmd) {
+ case REALM_SLEEP_CMD:
+ realm_sleep_cmd();
+ test_succeed = true;
+ break;
+ case REALM_LOOP_CMD:
+ realm_loop_cmd();
+ test_succeed = true;
+ break;
+ case REALM_MULTIPLE_REC_PSCI_DENIED_CMD:
+ test_succeed = test_realm_multiple_rec_psci_denied_cmd();
+ break;
+ case REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD:
+ test_succeed = test_realm_multiple_rec_multiple_cpu_cmd();
+ break;
+ case REALM_INSTR_FETCH_CMD:
+ test_succeed = test_realm_instr_fetch_cmd();
+ break;
+ case REALM_DATA_ACCESS_CMD:
+ test_succeed = test_realm_data_access_cmd();
+ break;
+ case REALM_PAUTH_SET_CMD:
+ test_succeed = test_realm_pauth_set_cmd();
+ break;
+ case REALM_PAUTH_CHECK_CMD:
+ test_succeed = test_realm_pauth_check_cmd();
+ break;
+ case REALM_PAUTH_FAULT:
+ test_succeed = test_realm_pauth_fault();
+ break;
+ case REALM_DIT_CHECK_CMD:
+ test_succeed = test_realm_dit_check_cmd();
+ break;
+ case REALM_GET_RSI_VERSION:
+ test_succeed = realm_get_rsi_version();
+ break;
+ case REALM_PMU_CYCLE:
+ test_succeed = test_pmuv3_cycle_works_realm();
+ break;
+ case REALM_PMU_COUNTER:
+ test_succeed = test_pmuv3_counter();
+ break;
+ case REALM_PMU_EVENT:
+ test_succeed = test_pmuv3_event_works_realm();
+ break;
+ case REALM_PMU_PRESERVE:
+ test_succeed = test_pmuv3_rmm_preserves();
+ break;
+ case REALM_PMU_INTERRUPT:
+ test_succeed = test_pmuv3_overflow_interrupt();
+ break;
+ case REALM_REQ_FPU_FILL_CMD:
+ fpu_state_write_rand(&rl_fpu_state_write);
+ test_succeed = true;
+ break;
+ case REALM_REQ_FPU_CMP_CMD:
+ fpu_state_read(&rl_fpu_state_read);
+ test_succeed = !fpu_state_compare(&rl_fpu_state_write,
+ &rl_fpu_state_read);
+ break;
+ case REALM_REJECT_SET_RIPAS_CMD:
+ test_succeed = test_realm_reject_set_ripas();
+ break;
+ case REALM_SET_RIPAS_CMD:
+ test_succeed = test_realm_set_ripas();
+ break;
+ case REALM_SVE_RDVL:
+ test_succeed = test_realm_sve_rdvl();
+ break;
+ case REALM_SVE_ID_REGISTERS:
+ test_succeed = test_realm_sve_read_id_registers();
+ break;
+ case REALM_SVE_PROBE_VL:
+ test_succeed = test_realm_sve_probe_vl();
+ break;
+ case REALM_SVE_OPS:
+ test_succeed = test_realm_sve_ops();
+ break;
+ case REALM_SVE_FILL_REGS:
+ test_succeed = test_realm_sve_fill_regs();
+ break;
+ case REALM_SVE_CMP_REGS:
+ test_succeed = test_realm_sve_cmp_regs();
+ break;
+ case REALM_SVE_UNDEF_ABORT:
+ test_succeed = test_realm_sve_undef_abort();
+ break;
+ case REALM_SME_ID_REGISTERS:
+ test_succeed = test_realm_sme_read_id_registers();
+ break;
+ case REALM_SME_UNDEF_ABORT:
+ test_succeed = test_realm_sme_undef_abort();
+ break;
+ default:
+ realm_printf("%s() invalid cmd %u\n", __func__, cmd);
+ break;
+ }
+ }
+
+ if (test_succeed) {
+ rsi_exit_to_host(HOST_CALL_EXIT_SUCCESS_CMD);
+ } else {
+ rsi_exit_to_host(HOST_CALL_EXIT_FAILED_CMD);
+ }
+}
diff --git a/realm/realm_pmuv3.c b/realm/realm_pmuv3.c
new file mode 100644
index 000000000..0d4782a3b
--- /dev/null
+++ b/realm/realm_pmuv3.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <drivers/arm/gic_v3.h>
+
+#include <host_realm_pmu.h>
+#include <realm_rsi.h>
+
+/* PMUv3 events */
+#define PMU_EVT_SW_INCR 0x0
+#define PMU_EVT_INST_RETIRED 0x8
+#define PMU_EVT_CPU_CYCLES 0x11
+#define PMU_EVT_MEM_ACCESS 0x13
+
+#define NOP_REPETITIONS 50
+#define MAX_COUNTERS 32
+
+#define PRE_OVERFLOW ~(0xF)
+
+#define DELAY_MS 3000ULL
+
+static inline void read_all_counters(u_register_t *array, int impl_ev_ctrs)
+{
+ array[0] = read_pmccntr_el0();
+ for (unsigned int i = 0U; i < impl_ev_ctrs; i++) {
+ array[i + 1] = read_pmevcntrn_el0(i);
+ }
+}
+
+static inline void read_all_counter_configs(u_register_t *array, int impl_ev_ctrs)
+{
+ array[0] = read_pmccfiltr_el0();
+ for (unsigned int i = 0U; i < impl_ev_ctrs; i++) {
+ array[i + 1] = read_pmevtypern_el0(i);
+ }
+}
+
+static inline void read_all_pmu_configs(u_register_t *array)
+{
+ array[0] = read_pmcntenset_el0();
+ array[1] = read_pmcr_el0();
+ array[2] = read_pmselr_el0();
+}
+
+static inline void enable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
+ /* This function means we are about to use the PMU, synchronize */
+ isb();
+}
+
+static inline void disable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_E_BIT);
+ /* We also rely that disabling really did work */
+ isb();
+}
+
+static inline void clear_counters(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
+ isb();
+}
+
+static void pmu_reset(void)
+{
+ /* Reset all counters */
+ write_pmcr_el0(read_pmcr_el0() |
+ PMCR_EL0_DP_BIT | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
+
+ /* Disable all counters */
+ write_pmcntenclr_el0(PMU_CLEAR_ALL);
+
+ /* Clear overflow status */
+ write_pmovsclr_el0(PMU_CLEAR_ALL);
+
+ /* Disable overflow interrupts on all counters */
+ write_pmintenclr_el1(PMU_CLEAR_ALL);
+ isb();
+}
+
+/*
+ * This test runs in Realm EL1, don't bother enabling counting at lower ELs
+ * and secure world. TF-A has other controls for them and counting there
+ * doesn't impact us.
+ */
+static inline void enable_cycle_counter(void)
+{
+ /*
+ * Set PMCCFILTR_EL0.U != PMCCFILTR_EL0.RLU
+ * to disable counting in Realm EL0.
+ * Set PMCCFILTR_EL0.P = PMCCFILTR_EL0.RLK
+ * to enable counting in Realm EL1.
+ * Set PMCCFILTR_EL0.NSH = PMCCFILTR_EL0_EL0.RLH
+ * to disable event counting in Realm EL2.
+ */
+ write_pmccfiltr_el0(PMCCFILTR_EL0_U_BIT |
+ PMCCFILTR_EL0_P_BIT | PMCCFILTR_EL0_RLK_BIT |
+ PMCCFILTR_EL0_NSH_BIT | PMCCFILTR_EL0_RLH_BIT);
+ write_pmcntenset_el0(read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
+ isb();
+}
+
+static inline void enable_event_counter(int ctr_num)
+{
+ /*
+ * Set PMEVTYPER_EL0.U != PMEVTYPER_EL0.RLU
+ * to disable event counting in Realm EL0.
+ * Set PMEVTYPER_EL0.P = PMEVTYPER_EL0.RLK
+ * to enable counting in Realm EL1.
+ * Set PMEVTYPER_EL0.NSH = PMEVTYPER_EL0.RLH
+ * to disable event counting in Realm EL2.
+ */
+ write_pmevtypern_el0(ctr_num,
+ PMEVTYPER_EL0_U_BIT |
+ PMEVTYPER_EL0_P_BIT | PMEVTYPER_EL0_RLK_BIT |
+ PMEVTYPER_EL0_NSH_BIT | PMEVTYPER_EL0_RLH_BIT |
+ (PMU_EVT_INST_RETIRED & PMEVTYPER_EL0_EVTCOUNT_BITS));
+ write_pmcntenset_el0(read_pmcntenset_el0() |
+ PMCNTENSET_EL0_P_BIT(ctr_num));
+ isb();
+}
+
+/* Doesn't really matter what happens, as long as it happens a lot */
+static inline void execute_nops(void)
+{
+ for (unsigned int i = 0U; i < NOP_REPETITIONS; i++) {
+ __asm__ ("orr x0, x0, x0\n");
+ }
+}
+
+/*
+ * Try the cycle counter with some NOPs to see if it works
+ */
+bool test_pmuv3_cycle_works_realm(void)
+{
+ u_register_t ccounter_start;
+ u_register_t ccounter_end;
+
+ pmu_reset();
+
+ enable_cycle_counter();
+ enable_counting();
+
+ ccounter_start = read_pmccntr_el0();
+ execute_nops();
+ ccounter_end = read_pmccntr_el0();
+ disable_counting();
+ clear_counters();
+
+ realm_printf("counted from %lu to %lu\n",
+ ccounter_start, ccounter_end);
+ if (ccounter_start != ccounter_end) {
+ return true;
+ }
+ return false;
+}
+
+/* Test if max counter available is same as that programmed by host */
+bool test_pmuv3_counter(void)
+{
+ uint64_t num_cnts, num_cnts_host;
+
+ num_cnts_host = realm_shared_data_get_my_host_val(HOST_ARG1_INDEX);
+ num_cnts = GET_PMU_CNT;
+ realm_printf("CPU=%u num_cnts=%lu num_cnts_host=%lu\n", read_mpidr_el1() & MPID_MASK,
+ num_cnts, num_cnts_host);
+ if (num_cnts == num_cnts_host) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Try an event counter with some NOPs to see if it works.
+ */
+bool test_pmuv3_event_works_realm(void)
+{
+ u_register_t evcounter_start;
+ u_register_t evcounter_end;
+
+ if (GET_PMU_CNT == 0) {
+ realm_printf("no event counters implemented\n");
+ return false;
+ }
+
+ pmu_reset();
+
+ enable_event_counter(0);
+ enable_counting();
+
+ /*
+ * If any is enabled it will be in the first range.
+ */
+ evcounter_start = read_pmevcntrn_el0(0);
+ execute_nops();
+ disable_counting();
+ evcounter_end = read_pmevcntrn_el0(0);
+ clear_counters();
+
+ realm_printf("counted from %lu to %lu\n",
+ evcounter_start, evcounter_end);
+ if (evcounter_start != evcounter_end) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Check if entering/exiting RMM (with a NOP) preserves all PMU registers.
+ */
+bool test_pmuv3_rmm_preserves(void)
+{
+ u_register_t ctr_start[MAX_COUNTERS] = {0};
+ u_register_t ctr_cfg_start[MAX_COUNTERS] = {0};
+ u_register_t pmu_cfg_start[3];
+ u_register_t ctr_end[MAX_COUNTERS] = {0};
+ u_register_t ctr_cfg_end[MAX_COUNTERS] = {0};
+ u_register_t pmu_cfg_end[3];
+ unsigned int impl_ev_ctrs = GET_PMU_CNT;
+
+ realm_printf("testing %u event counters\n", impl_ev_ctrs);
+
+ pmu_reset();
+
+ /* Pretend counters have just been used */
+ enable_cycle_counter();
+ enable_event_counter(0);
+ enable_counting();
+ execute_nops();
+ disable_counting();
+
+ /* Get before reading */
+ read_all_counters(ctr_start, impl_ev_ctrs);
+ read_all_counter_configs(ctr_cfg_start, impl_ev_ctrs);
+ read_all_pmu_configs(pmu_cfg_start);
+
+ /* Give RMM a chance to scramble everything */
+ (void)rsi_get_version(RSI_ABI_VERSION_VAL);
+
+ /* Get after reading */
+ read_all_counters(ctr_end, impl_ev_ctrs);
+ read_all_counter_configs(ctr_cfg_end, impl_ev_ctrs);
+ read_all_pmu_configs(pmu_cfg_end);
+
+ if (memcmp(ctr_start, ctr_end, sizeof(ctr_start)) != 0) {
+ realm_printf("SMC call did not preserve %s\n",
+ "counters");
+ return false;
+ }
+
+ if (memcmp(ctr_cfg_start, ctr_cfg_end, sizeof(ctr_cfg_start)) != 0) {
+ realm_printf("SMC call did not preserve %s\n",
+ "counter config");
+ return false;
+ }
+
+ if (memcmp(pmu_cfg_start, pmu_cfg_end, sizeof(pmu_cfg_start)) != 0) {
+ realm_printf("SMC call did not preserve %s\n",
+ "PMU registers");
+ return false;
+ }
+
+ return true;
+}
+
+bool test_pmuv3_overflow_interrupt(void)
+{
+ unsigned long priority_bits, priority;
+ uint64_t delay_time = DELAY_MS;
+
+ pmu_reset();
+
+ /* Get the number of priority bits implemented */
+ priority_bits = ((read_icv_ctrl_el1() >> ICV_CTLR_EL1_PRIbits_SHIFT) &
+ ICV_CTLR_EL1_PRIbits_MASK) + 1UL;
+
+ /* Unimplemented bits are RES0 and start from LSB */
+ priority = (0xFFUL << (8UL - priority_bits)) & 0xFFUL;
+
+ /* Set the priority mask register to allow all interrupts */
+ write_icv_pmr_el1(priority);
+
+ /* Enable Virtual Group 1 interrupts */
+ write_icv_igrpen1_el1(ICV_IGRPEN1_EL1_Enable);
+
+ /* Enable IRQ */
+ enable_irq();
+
+ write_pmevcntrn_el0(0, PRE_OVERFLOW);
+ enable_event_counter(0);
+
+ /* Enable interrupt on event counter #0 */
+ write_pmintenset_el1((1UL << 0));
+
+ realm_printf("waiting for PMU vIRQ...\n");
+
+ enable_counting();
+ execute_nops();
+
+ /*
+ * Interrupt handler will clear
+ * Performance Monitors Interrupt Enable Set register
+ * as part of handling the overflow interrupt.
+ */
+ while ((read_pmintenset_el1() != 0UL) && (delay_time != 0ULL)) {
+ --delay_time;
+ }
+
+ /* Disable IRQ */
+ disable_irq();
+
+ pmu_reset();
+
+ if (delay_time == 0ULL) {
+ realm_printf("PMU vIRQ %sreceived in %llums\n", "not ",
+ DELAY_MS);
+ return false;
+ }
+
+ realm_printf("PMU vIRQ %sreceived in %llums\n", "",
+ DELAY_MS - delay_time);
+
+ return true;
+}
diff --git a/realm/realm_psci.c b/realm/realm_psci.c
new file mode 100644
index 000000000..a4a287b3b
--- /dev/null
+++ b/realm/realm_psci.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <debug.h>
+#include <psci.h>
+#include <realm_def.h>
+#include <tftf_lib.h>
+
+typedef void (*secondary_ep_t)(u_register_t);
+static secondary_ep_t entrypoint[MAX_REC_COUNT];
+static u_register_t context_id[MAX_REC_COUNT];
+void realm_entrypoint(void);
+void realm_payload_main(void);
+
+void realm_cpu_off(void)
+{
+ smc_args args = { SMC_PSCI_CPU_OFF };
+
+ tftf_smc(&args);
+}
+
+u_register_t realm_cpu_on(u_register_t mpidr, uintptr_t ep, u_register_t cxt_id)
+{
+ smc_args args;
+ smc_ret_values ret_vals;
+
+
+ if (mpidr > MAX_REC_COUNT) {
+ return PSCI_E_INVALID_PARAMS;
+ }
+
+ if (entrypoint[mpidr] != NULL) {
+ return PSCI_E_ALREADY_ON;
+ }
+
+ args.fid = SMC_PSCI_CPU_ON;
+ args.arg1 = mpidr;
+ args.arg2 = (u_register_t)realm_entrypoint;
+ args.arg3 = cxt_id;
+ entrypoint[mpidr] = (secondary_ep_t)ep;
+ context_id[mpidr] = cxt_id;
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+u_register_t realm_psci_affinity_info(u_register_t target_affinity,
+ uint32_t lowest_affinity_level)
+{
+ smc_args args;
+ smc_ret_values ret_vals;
+
+ args.fid = SMC_PSCI_AFFINITY_INFO;
+ args.arg1 = target_affinity;
+ args.arg2 = lowest_affinity_level;
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+u_register_t realm_psci_features(uint32_t psci_func_id)
+{
+
+ smc_args args;
+ smc_ret_values ret_vals;
+
+ args.fid = SMC_PSCI_FEATURES;
+ args.arg1 = psci_func_id;
+ ret_vals = tftf_smc(&args);
+ return ret_vals.ret0;
+}
+
+void realm_secondary_entrypoint(u_register_t cxt_id)
+{
+ u_register_t my_mpidr, id;
+ secondary_ep_t ep;
+
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ ep = entrypoint[my_mpidr];
+ id = context_id[my_mpidr];
+ if (ep != NULL) {
+ entrypoint[my_mpidr] = NULL;
+ context_id[my_mpidr] = 0;
+ (ep)(id);
+ } else {
+ /*
+ * Host can execute Rec directly without CPU_ON
+ * from Realm, if Rec is created RUNNABLE
+ * Jump to main in this case.
+ */
+ while (true) {
+ realm_payload_main();
+ }
+ }
+ realm_cpu_off();
+}
diff --git a/realm/realm_rsi.c b/realm/realm_rsi.c
new file mode 100644
index 000000000..04f57fc2d
--- /dev/null
+++ b/realm/realm_rsi.c
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <host_realm_rmi.h>
+#include <lib/aarch64/arch_features.h>
+#include <realm_rsi.h>
+#include <smccc.h>
+
+/* This function return RSI_ABI_VERSION */
+u_register_t rsi_get_version(u_register_t req_ver)
+{
+ smc_ret_values res = {};
+
+ res = tftf_smc(&(smc_args)
+ {RSI_VERSION, req_ver, 0UL, 0UL, 0UL, 0UL, 0UL, 0UL});
+
+ if (res.ret0 == SMC_UNKNOWN) {
+ return SMC_UNKNOWN;
+ }
+ /* Return lower version. */
+ return res.ret1;
+}
+
+/* This function will call the Host to request IPA of the NS shared buffer */
+u_register_t rsi_get_ns_buffer(void)
+{
+ smc_ret_values res = {};
+ struct rsi_host_call host_cal __aligned(sizeof(struct rsi_host_call));
+
+ host_cal.imm = HOST_CALL_GET_SHARED_BUFF_CMD;
+ res = tftf_smc(&(smc_args) {RSI_HOST_CALL, (u_register_t)&host_cal,
+ 0UL, 0UL, 0UL, 0UL, 0UL, 0UL});
+ if (res.ret0 != RSI_SUCCESS) {
+ return 0U;
+ }
+ return host_cal.gprs[0];
+}
+
+/* This function call Host and request to exit Realm with proper exit code */
+void rsi_exit_to_host(enum host_call_cmd exit_code)
+{
+ struct rsi_host_call host_cal __aligned(sizeof(struct rsi_host_call));
+
+ host_cal.imm = exit_code;
+ host_cal.gprs[0] = read_mpidr_el1() & MPID_MASK;
+ tftf_smc(&(smc_args) {RSI_HOST_CALL, (u_register_t)&host_cal,
+ 0UL, 0UL, 0UL, 0UL, 0UL, 0UL});
+}
+
+/* This function will exit to the Host to request RIPAS CHANGE of IPA range */
+u_register_t rsi_ipa_state_set(u_register_t base,
+ u_register_t top,
+ rsi_ripas_type ripas,
+ u_register_t flag,
+ u_register_t *new_base,
+ rsi_ripas_respose_type *response)
+{
+ smc_ret_values res = {};
+
+ res = tftf_smc(&(smc_args)
+ {RSI_IPA_STATE_SET, base, top, ripas, flag});
+ if (res.ret0 == RSI_SUCCESS) {
+ *new_base = res.ret1;
+ *response = res.ret2;
+ }
+ return res.ret0;
+}
+
+/* This function will return RIPAS of IPA */
+u_register_t rsi_ipa_state_get(u_register_t adr, rsi_ripas_type *ripas)
+{
+ smc_ret_values res = {};
+
+ res = tftf_smc(&(smc_args)
+ {RSI_IPA_STATE_GET, adr});
+ if (res.ret0 == RSI_SUCCESS) {
+ *ripas = res.ret1;
+ }
+ return res.ret0;
+}
diff --git a/realm/realm_shared_data.c b/realm/realm_shared_data.c
new file mode 100644
index 000000000..2d09f7873
--- /dev/null
+++ b/realm/realm_shared_data.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <host_shared_data.h>
+
+/**
+ * @brief - Returns the base address of the shared region
+ * @param - Void
+ * @return - Base address of the shared region
+ **/
+
+static host_shared_data_t *guest_shared_data;
+
+/*
+ * Set guest mapped shared buffer pointer
+ */
+void realm_set_shared_structure(host_shared_data_t *ptr)
+{
+ guest_shared_data = ptr;
+}
+
+/*
+ * Get guest mapped shared buffer pointer
+ */
+host_shared_data_t *realm_get_my_shared_structure(void)
+{
+ return &guest_shared_data[read_mpidr_el1() & MPID_MASK];
+}
+
+/*
+ * Return Host's data at index
+ */
+u_register_t realm_shared_data_get_my_host_val(uint8_t index)
+{
+ assert(index < MAX_DATA_SIZE);
+ return guest_shared_data[read_mpidr_el1() & MPID_MASK].host_param_val[index];
+}
+
+/*
+ * Get command sent from Host to this rec
+ */
+uint8_t realm_shared_data_get_my_realm_cmd(void)
+{
+ return guest_shared_data[read_mpidr_el1() & MPID_MASK].realm_cmd;
+}
+
+/*
+ * Set data to be shared from this rec to Host
+ */
+void realm_shared_data_set_my_realm_val(uint8_t index, u_register_t val)
+{
+ assert(index < MAX_DATA_SIZE);
+ guest_shared_data[read_mpidr_el1() & MPID_MASK].realm_out_val[index] = val;
+}
+
diff --git a/realm/realm_simd.c b/realm/realm_simd.c
new file mode 100644
index 000000000..2eb3eab31
--- /dev/null
+++ b/realm/realm_simd.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <stdlib.h>
+#include <sync.h>
+#include <lib/extensions/fpu.h>
+#include <lib/extensions/sve.h>
+
+#include <host_realm_simd.h>
+#include <host_shared_data.h>
+
+#define RL_SVE_OP_ARRAYSIZE 512U
+#define SVE_TEST_ITERATIONS 4U
+
+static int rl_sve_op_1[RL_SVE_OP_ARRAYSIZE];
+static int rl_sve_op_2[RL_SVE_OP_ARRAYSIZE];
+
+static sve_z_regs_t rl_sve_z_regs_write;
+static sve_z_regs_t rl_sve_z_regs_read;
+
+static sve_p_regs_t rl_sve_p_regs_write;
+static sve_p_regs_t rl_sve_p_regs_read;
+
+static sve_ffr_regs_t rl_sve_ffr_regs_write;
+static sve_ffr_regs_t rl_sve_ffr_regs_read;
+
+static fpu_cs_regs_t rl_fpu_cs_regs_write;
+static fpu_cs_regs_t rl_fpu_cs_regs_read;
+
+static int volatile realm_got_undef_abort;
+
+/* Returns the maximum supported VL. This test is called only by sve Realm */
+bool test_realm_sve_rdvl(void)
+{
+ host_shared_data_t *sd = realm_get_my_shared_structure();
+ struct sve_cmd_rdvl *output;
+
+ assert(is_armv8_2_sve_present());
+
+ output = (struct sve_cmd_rdvl *)sd->realm_cmd_output_buffer;
+ memset((void *)output, 0, sizeof(struct sve_cmd_rdvl));
+
+ sve_config_vq(SVE_VQ_ARCH_MAX);
+ output->rdvl = sve_rdvl_1();
+
+ return true;
+}
+
+/*
+ * Reads and returns the ID_AA64PFR0_EL1 and ID_AA64ZFR0_EL1 registers
+ * This test could be called from sve or non-sve Realm
+ */
+bool test_realm_sve_read_id_registers(void)
+{
+ host_shared_data_t *sd = realm_get_my_shared_structure();
+ struct sve_cmd_id_regs *output;
+
+ output = (struct sve_cmd_id_regs *)sd->realm_cmd_output_buffer;
+ memset((void *)output, 0, sizeof(struct sve_cmd_id_regs));
+
+ realm_printf("reading ID registers: ID_AA64PFR0_EL1, "
+ " ID_AA64ZFR0_EL1\n");
+ output->id_aa64pfr0_el1 = read_id_aa64pfr0_el1();
+ output->id_aa64zfr0_el1 = read_id_aa64zfr0_el1();
+
+ return true;
+}
+
+/*
+ * Probes all VLs and return the bitmap with the bit set for each corresponding
+ * valid VQ. This test is called only by sve Realm
+ */
+bool test_realm_sve_probe_vl(void)
+{
+ host_shared_data_t *sd = realm_get_my_shared_structure();
+ struct sve_cmd_probe_vl *output;
+
+ assert(is_armv8_2_sve_present());
+
+ output = (struct sve_cmd_probe_vl *)&sd->realm_cmd_output_buffer;
+ memset((void *)output, 0, sizeof(struct sve_cmd_probe_vl));
+
+ /* Probe all VLs */
+ output->vl_bitmap = sve_probe_vl(SVE_VQ_ARCH_MAX);
+
+ return true;
+}
+
+bool test_realm_sve_ops(void)
+{
+ int val, i;
+
+ assert(is_armv8_2_sve_present());
+
+ /* get at random value to do sve_subtract */
+ val = rand();
+ for (i = 0; i < RL_SVE_OP_ARRAYSIZE; i++) {
+ rl_sve_op_1[i] = val - i;
+ rl_sve_op_2[i] = 1;
+ }
+
+ for (i = 0; i < SVE_TEST_ITERATIONS; i++) {
+ /* Config Realm with random SVE length */
+ sve_config_vq(SVE_GET_RANDOM_VQ);
+
+ /* Perform SVE operations, without world switch */
+ sve_subtract_arrays(rl_sve_op_1, rl_sve_op_1, rl_sve_op_2,
+ RL_SVE_OP_ARRAYSIZE);
+ }
+
+ /* Check result of SVE operations. */
+ for (i = 0; i < RL_SVE_OP_ARRAYSIZE; i++) {
+ if (rl_sve_op_1[i] != (val - i - SVE_TEST_ITERATIONS)) {
+ realm_printf("SVE ops failed\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Fill SVE Z registers with known pattern */
+bool test_realm_sve_fill_regs(void)
+{
+ assert(is_armv8_2_sve_present());
+
+ /* Config Realm with max SVE length */
+ sve_config_vq(SVE_VQ_ARCH_MAX);
+
+ sve_z_regs_write_rand(&rl_sve_z_regs_write);
+ sve_p_regs_write_rand(&rl_sve_p_regs_write);
+ sve_ffr_regs_write_rand(&rl_sve_ffr_regs_write);
+
+ /* fpcr, fpsr common registers */
+ fpu_cs_regs_write_rand(&rl_fpu_cs_regs_write);
+
+ return true;
+}
+
+/* Compare SVE Z registers with last filled in values */
+bool test_realm_sve_cmp_regs(void)
+{
+ bool rc = true;
+ uint64_t bit_map;
+
+ assert(is_armv8_2_sve_present());
+
+ memset(&rl_sve_z_regs_read, 0, sizeof(rl_sve_z_regs_read));
+ memset(&rl_sve_p_regs_read, 0, sizeof(rl_sve_p_regs_read));
+ memset(&rl_sve_ffr_regs_read, 0, sizeof(rl_sve_ffr_regs_read));
+
+ /* Read all SVE registers */
+ sve_z_regs_read(&rl_sve_z_regs_read);
+ sve_p_regs_read(&rl_sve_p_regs_read);
+ sve_ffr_regs_read(&rl_sve_ffr_regs_read);
+
+ /* Compare the read values with last written values */
+ bit_map = sve_z_regs_compare(&rl_sve_z_regs_write, &rl_sve_z_regs_read);
+ if (bit_map) {
+ rc = false;
+ }
+
+ bit_map = sve_p_regs_compare(&rl_sve_p_regs_write, &rl_sve_p_regs_read);
+ if (bit_map) {
+ rc = false;
+ }
+
+ bit_map = sve_ffr_regs_compare(&rl_sve_ffr_regs_write,
+ &rl_sve_ffr_regs_read);
+ if (bit_map) {
+ rc = false;
+ }
+
+ /* fpcr, fpsr common registers */
+ fpu_cs_regs_read(&rl_fpu_cs_regs_read);
+ if (fpu_cs_regs_compare(&rl_fpu_cs_regs_write, &rl_fpu_cs_regs_read)) {
+ ERROR("FPCR/FPSR mismatch\n");
+ rc = false;
+ }
+
+ return rc;
+}
+
+static bool realm_sync_exception_handler(void)
+{
+ uint64_t esr_el1 = read_esr_el1();
+
+ if (EC_BITS(esr_el1) == EC_UNKNOWN) {
+ realm_printf("received undefined abort. "
+ "esr_el1: 0x%llx elr_el1: 0x%llx\n",
+ esr_el1, read_elr_el1());
+ realm_got_undef_abort++;
+ }
+
+ return true;
+}
+
+/* Check if Realm gets undefined abort when it accesses SVE functionality */
+bool test_realm_sve_undef_abort(void)
+{
+ realm_got_undef_abort = 0UL;
+
+ /* install exception handler to catch undef abort */
+ register_custom_sync_exception_handler(&realm_sync_exception_handler);
+ (void)sve_rdvl_1();
+ unregister_custom_sync_exception_handler();
+
+ if (realm_got_undef_abort == 0UL) {
+ return false;
+ }
+
+ return true;
+}
+
+/* Reads and returns the ID_AA64PFR1_EL1 and ID_AA64SMFR0_EL1 registers */
+bool test_realm_sme_read_id_registers(void)
+{
+ host_shared_data_t *sd = realm_get_my_shared_structure();
+ struct sme_cmd_id_regs *output;
+
+ output = (struct sme_cmd_id_regs *)sd->realm_cmd_output_buffer;
+ memset((void *)output, 0, sizeof(struct sme_cmd_id_regs));
+
+ realm_printf("reading ID registers: ID_AA64PFR1_EL1, "
+ " ID_AA64SMFR0_EL1\n");
+
+ output->id_aa64pfr1_el1 = read_id_aa64pfr1_el1();
+ output->id_aa64smfr0_el1 = read_id_aa64smfr0_el1();
+
+ return true;
+}
+
+/* Check if Realm gets undefined abort when it access SME functionality */
+bool test_realm_sme_undef_abort(void)
+{
+ realm_got_undef_abort = 0UL;
+
+ /* install exception handler to catch undef abort */
+ register_custom_sync_exception_handler(&realm_sync_exception_handler);
+ (void)read_svcr();
+ unregister_custom_sync_exception_handler();
+
+ if (realm_got_undef_abort == 0UL) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/smc_fuzz/dts/sdei.dts b/smc_fuzz/dts/sdei.dts
index 2418916ad..a8199e1d5 100644
--- a/smc_fuzz/dts/sdei.dts
+++ b/smc_fuzz/dts/sdei.dts
@@ -15,31 +15,31 @@
bias = <30>;
sdei_version {
bias = <30>;
- functionname = "sdei_version";
+ functionname = "sdei_version_funcid";
};
sdei_pe_unmask {
bias = <30>;
- functionname = "sdei_pe_unmask";
+ functionname = "sdei_pe_unmask_funcid";
};
sdei_pe_mask {
bias = <30>;
- functionname = "sdei_pe_mask";
+ functionname = "sdei_pe_mask_funcid";
};
sdei_event_status {
bias = <30>;
- functionname = "sdei_event_status";
+ functionname = "sdei_event_status_funcid";
};
sdei_event_signal {
bias = <30>;
- functionname = "sdei_event_signal";
+ functionname = "sdei_event_signal_funcid";
};
sdei_private_reset {
bias = <30>;
- functionname = "sdei_private_reset";
+ functionname = "sdei_private_reset_funcid";
};
sdei_shared_reset {
bias = <30>;
- functionname = "sdei_shared_reset";
+ functionname = "sdei_shared_reset_funcid";
};
};
diff --git a/smc_fuzz/dts/top.dts b/smc_fuzz/dts/top.dts
new file mode 100644
index 000000000..4d37c7e0c
--- /dev/null
+++ b/smc_fuzz/dts/top.dts
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/*
+ * Top level device tree file to bias the SMC calls. T
+ * he biases are arbitrary and can be any value.
+ * They are only significant when weighted against the
+ * other biases. 30 was chosen arbitrarily.
+ */
+
+/dts-v1/;
+
+/ {
+
+ sdei {
+ bias = <30>;
+ sdei_version {
+ bias = <30>;
+ functionname = "sdei_version_funcid";
+ };
+ sdei_pe_unmask {
+ bias = <30>;
+ functionname = "sdei_pe_unmask_funcid";
+ };
+ sdei_pe_mask {
+ bias = <30>;
+ functionname = "sdei_pe_mask_funcid";
+ };
+ sdei_event_status {
+ bias = <30>;
+ functionname = "sdei_event_status_funcid";
+ };
+ sdei_event_signal {
+ bias = <30>;
+ functionname = "sdei_event_signal_funcid";
+ };
+ sdei_private_reset {
+ bias = <30>;
+ functionname = "sdei_private_reset_funcid";
+ };
+ sdei_shared_reset {
+ bias = <30>;
+ functionname = "sdei_shared_reset_funcid";
+ };
+ };
+ tsp {
+ bias = <30>;
+ tsp_add_op {
+ bias = <30>;
+ functionname = "tsp_add_op_funcid";
+ };
+ tsp_sub_op {
+ bias = <30>;
+ functionname = "tsp_sub_op_funcid";
+ };
+ tsp_mul_op {
+ bias = <30>;
+ functionname = "tsp_mul_op_funcid";
+ };
+ tsp_div_op {
+ bias = <30>;
+ functionname = "tsp_div_op_funcid";
+ };
+ };
+};
diff --git a/smc_fuzz/include/fifo3d.h b/smc_fuzz/include/fifo3d.h
index c04567ce1..95ebb4aca 100644
--- a/smc_fuzz/include/fifo3d.h
+++ b/smc_fuzz/include/fifo3d.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,22 +18,28 @@ struct fifo3d {
char ***nnfifo;
char ***fnamefifo;
int **biasfifo;
+ int **fidfifo;
int col;
int curr_col;
int *row;
};
/*
- * Push function name string into raw data structure
+ * Push function name string into the raw data structure
*/
void push_3dfifo_fname(struct fifo3d *f3d, char *fname);
/*
- * Push bias value into raw data structure
+ * Push bias value into the raw data structure
*/
void push_3dfifo_bias(struct fifo3d *f3d, int bias);
/*
+ * Push id for function value into the raw data structure
+ */
+void push_3dfifo_fid(struct fifo3d *f3d, int id);
+
+/*
* Create new column and/or row for raw data structure for newly
* found node from device tree
*/
diff --git a/smc_fuzz/include/fuzz_helper.h b/smc_fuzz/include/fuzz_helper.h
new file mode 100644
index 000000000..defa0a582
--- /dev/null
+++ b/smc_fuzz/include/fuzz_helper.h
@@ -0,0 +1,7 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#define CMP_SUCCESS 0
diff --git a/smc_fuzz/include/nfifo.h b/smc_fuzz/include/nfifo.h
new file mode 100644
index 000000000..cef07da97
--- /dev/null
+++ b/smc_fuzz/include/nfifo.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#ifndef NFIFO_H
+#define NFIFO_H
+
+#define CMP_SUCCESS 0
+#define NFIFO_Q_THRESHOLD 10
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "smcmalloc.h"
+
+struct nfifo {
+ char **lnme;
+ int nent;
+ int thent;
+};
+
+void nfifoinit(struct nfifo *nf, struct memmod *mmod);
+void pushnme(char *nme, struct nfifo *nf, struct memmod *mmod);
+char *readnme(int ent, struct nfifo *nf, struct memmod *mmod);
+int searchnme(char *nme, struct nfifo *nf, struct memmod *mmod);
+void printent(struct nfifo *nf);
+
+#endif /* NFIFO_H */
diff --git a/smc_fuzz/include/sdei_fuzz_helper.h b/smc_fuzz/include/sdei_fuzz_helper.h
new file mode 100644
index 000000000..cf4ddd10c
--- /dev/null
+++ b/smc_fuzz/include/sdei_fuzz_helper.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fuzz_helper.h>
+#include <power_management.h>
+#include <sdei.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#ifndef sdei_version_funcid
+#define sdei_version_funcid 0
+#endif
+#ifndef sdei_pe_unmask_funcid
+#define sdei_pe_unmask_funcid 0
+#endif
+#ifndef sdei_pe_mask_funcid
+#define sdei_pe_mask_funcid 0
+#endif
+#ifndef sdei_event_status_funcid
+#define sdei_event_status_funcid 0
+#endif
+#ifndef sdei_event_signal_funcid
+#define sdei_event_signal_funcid 0
+#endif
+#ifndef sdei_private_reset_funcid
+#define sdei_private_reset_funcid 0
+#endif
+#ifndef sdei_shared_reset_funcid
+#define sdei_shared_reset_funcid 0
+#endif
+
+
+void tftf_test_sdei_noarg(int64_t (*sdei_func)(void), char *funcstr);
+void tftf_test_sdei_singlearg(int64_t (*sdei_func)(uint64_t), char *funcstr);
+void run_sdei_fuzz(int funcid);
diff --git a/smc_fuzz/include/smcmalloc.h b/smc_fuzz/include/smcmalloc.h
index 129e07c6e..fe134bfe1 100644
--- a/smc_fuzz/include/smcmalloc.h
+++ b/smc_fuzz/include/smcmalloc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,7 +11,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include "fifo3d.h"
#define TOTALMEMORYSIZE (0x10000)
#define BLKSPACEDIV (4)
diff --git a/smc_fuzz/include/tsp_fuzz_helper.h b/smc_fuzz/include/tsp_fuzz_helper.h
new file mode 100644
index 000000000..019ee6804
--- /dev/null
+++ b/smc_fuzz/include/tsp_fuzz_helper.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fuzz_helper.h>
+#include <power_management.h>
+#include <sdei.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#ifndef tsp_add_op_funcid
+#define tsp_add_op_funcid 0
+#endif
+#ifndef tsp_sub_op_funcid
+#define tsp_sub_op_funcid 0
+#endif
+#ifndef tsp_mul_op_funcid
+#define tsp_mul_op_funcid 0
+#endif
+#ifndef tsp_div_op_funcid
+#define tsp_div_op_funcid 0
+#endif
+
+void tftf_test_tsp_smc(uint64_t tsp_id, char *funcstr);
+void run_tsp_fuzz(int funcid);
diff --git a/smc_fuzz/src/fifo3d.c b/smc_fuzz/src/fifo3d.c
index 119b26c7e..0b99907d4 100644
--- a/smc_fuzz/src/fifo3d.c
+++ b/smc_fuzz/src/fifo3d.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -27,7 +27,7 @@
#endif
/*
- * Push function name string into raw data structure
+ * Push function name string into the data structure from device tree file
*/
void push_3dfifo_fname(struct fifo3d *f3d, char *fname)
{
@@ -36,7 +36,7 @@ void push_3dfifo_fname(struct fifo3d *f3d, char *fname)
}
/*
- * Push bias value into raw data structure
+ * Push bias value into data structure from device tree file
*/
void push_3dfifo_bias(struct fifo3d *f3d, int bias)
{
@@ -44,14 +44,36 @@ void push_3dfifo_bias(struct fifo3d *f3d, int bias)
}
/*
+ * Push function id value into data structure from device tree file
+ */
+void push_3dfifo_fid(struct fifo3d *f3d, int id)
+{
+ f3d->fidfifo[f3d->col - 1][f3d->row[f3d->col - 1] - 1] = id;
+}
+
+
+/*
* Create new column and/or row for raw data structure for newly
- * found node from device tree
+ * found node from device tree. The fifo has four elements that reflect
+ * values obtained from the device tree for each node read. This preserves
+ * the hierarchy found in that file so it can be utilized in construction of
+ * the smc nodes structure for final use in randomly calling the SMC functions.
+ * This is essentially a bias tree in final form.
*/
void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
{
+
+/*
+ * four elements required:
+ * 1. node name as a string
+ * 2. function name as a string
+ * 3. bias value as an integer
+ * 4. id value as an integer
+ */
char ***tnnfifo;
char ***tfnamefifo;
int **tbiasfifo;
+ int **tfidfifo;
if (f3d->col == f3d->curr_col) {
f3d->col++;
@@ -76,15 +98,17 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
f3d->row[f3d->col - 1] = 1;
/*
- * Create new raw data memory
+ * Start node creation for reading of device tree file
*/
tnnfifo = GENMALLOC(f3d->col * sizeof(char **));
tfnamefifo = GENMALLOC(f3d->col * sizeof(char **));
tbiasfifo = GENMALLOC((f3d->col) * sizeof(int *));
+ tfidfifo = GENMALLOC((f3d->col) * sizeof(int *));
for (unsigned int i = 0U; (int)i < f3d->col; i++) {
tnnfifo[i] = GENMALLOC(f3d->row[i] * sizeof(char *));
tfnamefifo[i] = GENMALLOC(f3d->row[i] * sizeof(char *));
tbiasfifo[i] = GENMALLOC((f3d->row[i]) * sizeof(int));
+ tfidfifo[i] = GENMALLOC((f3d->row[i]) * sizeof(int));
for (unsigned int j = 0U; (int)j < f3d->row[i]; j++) {
tnnfifo[i][j] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
tfnamefifo[i][j] =
@@ -95,6 +119,7 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
strlcpy(tfnamefifo[i][j],
f3d->fnamefifo[i][j], MAX_NAME_CHARS);
tbiasfifo[i][j] = f3d->biasfifo[i][j];
+ tfidfifo[i][j] = f3d->fidfifo[i][j];
}
}
}
@@ -107,6 +132,7 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
strlcpy(tfnamefifo[f3d->col - 1][f3d->row[f3d->col - 1] - 1],
"none", MAX_NAME_CHARS);
tbiasfifo[f3d->col - 1][f3d->row[f3d->col - 1] - 1] = 0;
+ tfidfifo[f3d->col - 1][f3d->row[f3d->col - 1] - 1] = 0;
/*
* Free the old raw data structres
@@ -119,11 +145,13 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
GENFREE(f3d->nnfifo[i]);
GENFREE(f3d->fnamefifo[i]);
GENFREE(f3d->biasfifo[i]);
+ GENFREE(f3d->fidfifo[i]);
}
if (f3d->col > 1) {
GENFREE(f3d->nnfifo);
GENFREE(f3d->fnamefifo);
GENFREE(f3d->biasfifo);
+ GENFREE(f3d->fidfifo);
}
/*
@@ -132,6 +160,7 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
f3d->nnfifo = tnnfifo;
f3d->fnamefifo = tfnamefifo;
f3d->biasfifo = tbiasfifo;
+ f3d->fidfifo = tfidfifo;
}
if (f3d->col != f3d->curr_col) {
/*
@@ -141,15 +170,17 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
f3d->row[f3d->col - 1]++;
/*
- * Create new raw data memory
+ * Create new node form device tree file
*/
tnnfifo = GENMALLOC(f3d->col * sizeof(char **));
tfnamefifo = GENMALLOC(f3d->col * sizeof(char **));
tbiasfifo = GENMALLOC((f3d->col) * sizeof(int *));
+ tfidfifo = GENMALLOC((f3d->col) * sizeof(int *));
for (unsigned int i = 0U; (int)i < f3d->col; i++) {
tnnfifo[i] = GENMALLOC(f3d->row[i] * sizeof(char *));
tfnamefifo[i] = GENMALLOC(f3d->row[i] * sizeof(char *));
tbiasfifo[i] = GENMALLOC((f3d->row[i]) * sizeof(int));
+ tfidfifo[i] = GENMALLOC((f3d->row[i]) * sizeof(int));
for (unsigned int j = 0U; (int)j < f3d->row[i]; j++) {
tnnfifo[i][j] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
tfnamefifo[i][j] =
@@ -160,6 +191,7 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
strlcpy(tfnamefifo[i][j],
f3d->fnamefifo[i][j], MAX_NAME_CHARS);
tbiasfifo[i][j] = f3d->biasfifo[i][j];
+ tfidfifo[i][j] = f3d->fidfifo[i][j];
}
}
}
@@ -172,6 +204,7 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
strlcpy(tfnamefifo[f3d->col - 1][f3d->row[f3d->col - 1] - 1],
"none", MAX_NAME_CHARS);
tbiasfifo[f3d->col - 1][f3d->row[f3d->col - 1] - 1] = 0;
+ tfidfifo[f3d->col - 1][f3d->row[f3d->col - 1] - 1] = 0;
/*
* Free the old raw data structres
@@ -187,10 +220,12 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
GENFREE(f3d->nnfifo[i]);
GENFREE(f3d->fnamefifo[i]);
GENFREE(f3d->biasfifo[i]);
+ GENFREE(f3d->fidfifo[i]);
}
GENFREE(f3d->nnfifo);
GENFREE(f3d->fnamefifo);
GENFREE(f3d->biasfifo);
+ GENFREE(f3d->fidfifo);
/*
* Point to new data
@@ -198,5 +233,6 @@ void push_3dfifo_col(struct fifo3d *f3d, char *entry, struct memmod *mmod)
f3d->nnfifo = tnnfifo;
f3d->fnamefifo = tfnamefifo;
f3d->biasfifo = tbiasfifo;
+ f3d->fidfifo = tfidfifo;
}
}
diff --git a/smc_fuzz/src/nfifo.c b/smc_fuzz/src/nfifo.c
new file mode 100644
index 000000000..b4a021c99
--- /dev/null
+++ b/smc_fuzz/src/nfifo.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * FIFO for matching strings to integers
+ */
+
+#include "nfifo.h"
+
+#ifdef SMC_FUZZ_TMALLOC
+#define GENMALLOC(x) malloc((x))
+#define GENFREE(x) free((x))
+#else
+#define GENMALLOC(x) smcmalloc((x), mmod)
+#define GENFREE(x) smcfree((x), mmod)
+#endif
+
+/*
+ * Initialization of FIFO
+ */
+void nfifoinit(struct nfifo *nf, struct memmod *mmod)
+{
+ nf->nent = 0;
+ nf->thent = NFIFO_Q_THRESHOLD;
+ nf->lnme = GENMALLOC(nf->thent * sizeof(char *));
+}
+
+/*
+ * push string to FIFO for automatic numerical assignment
+ */
+void pushnme(char *nme, struct nfifo *nf, struct memmod *mmod)
+{
+ char **tnme;
+
+ if (searchnme(nme, nf, mmod) == -1) {
+ if (nf->nent >= nf->thent) {
+ nf->thent += NFIFO_Q_THRESHOLD;
+ tnme = GENMALLOC(nf->thent * sizeof(char *));
+ for (unsigned int x = 0; x < nf->nent; x++) {
+ tnme[x] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
+ strlcpy(tnme[x], nf->lnme[x], MAX_NAME_CHARS);
+ }
+ tnme[nf->nent] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
+ strlcpy(tnme[nf->nent], nme, MAX_NAME_CHARS);
+ for (unsigned int x = 0; x < nf->nent; x++) {
+ GENFREE(nf->lnme[x]);
+ }
+ GENFREE(nf->lnme);
+ nf->lnme = tnme;
+ } else {
+ nf->lnme[nf->nent] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
+ strlcpy(nf->lnme[nf->nent], nme, MAX_NAME_CHARS);
+ }
+ nf->nent++;
+ }
+}
+
+/*
+ * Find name associated with numercal designation
+ */
+char *readnme(int ent, struct nfifo *nf, struct memmod *mmod)
+{
+ return nf->lnme[ent];
+}
+
+/*
+ * Search FIFO for integer given an input string returns -1
+ * if not found
+ */
+int searchnme(char *nme, struct nfifo *nf, struct memmod *mmod)
+{
+ for (unsigned int x = 0; x < nf->nent; x++) {
+ if (strcmp(nf->lnme[x], nme) == CMP_SUCCESS) {
+ return (x + 1);
+ }
+ }
+ return -1;
+}
+
+/*
+ * Print of all elements of FIFO string and associated integer
+ */
+void printent(struct nfifo *nf)
+{
+ for (unsigned int x = 0; x < nf->nent; x++) {
+ printf("nfifo entry %s has value %d\n", nf->lnme[x], x);
+ }
+}
diff --git a/smc_fuzz/src/randsmcmod.c b/smc_fuzz/src/randsmcmod.c
index 2b8d01729..a86feb61e 100644
--- a/smc_fuzz/src/randsmcmod.c
+++ b/smc_fuzz/src/randsmcmod.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,19 +9,20 @@
#include <drivers/arm/private_timer.h>
#include <events.h>
#include "fifo3d.h"
+#include "nfifo.h"
#include <libfdt.h>
+#include <plat_topology.h>
#include <power_management.h>
-#include <sdei.h>
#include <tftf_lib.h>
-#include <timer.h>
-
-#include <plat_topology.h>
-#include <platform.h>
extern char _binary___dtb_start[];
+extern void runtestfunction(int funcid);
struct memmod tmod __aligned(65536) __section("smcfuzz");
+static int cntndarray;
+static struct rand_smc_node *ndarray;
+static struct memmod *mmod;
/*
* switch to use either standard C malloc or custom SMC malloc
@@ -119,13 +120,14 @@ struct rand_smc_node {
int *biases; // Biases of the individual nodes
int *biasarray; // Array of biases across all nodes
char **snames; // String that is unique to the SMC call called in test
+ int *snameid; // ID that is unique to the SMC call called in test
struct rand_smc_node *treenodes; // Selection of nodes that are farther down in the tree
- // that reference further rand_smc_node objects
+ // that reference further rand_smc_node objects
int *norcall; // Specifies whether a particular node is a leaf node or tree node
- int entries; // Number of nodes in object
- int biasent; // Number that gives the total number of entries in biasarray
- // based on all biases of the nodes
- char **nname; // Array of node names
+ int entries; // Number of nodes in object
+ int biasent; // Number that gives the total number of entries in biasarray
+ // based on all biases of the nodes
+ char **nname; // Array of node names
};
@@ -156,6 +158,9 @@ struct rand_smc_node *createsmctree(int *casz,
int cntndarray;
struct rand_smc_node nrnode;
struct rand_smc_node *tndarray;
+ struct nfifo nf;
+
+ nfifoinit(&nf, mmod);
f3d.col = 0;
f3d.curr_col = 0;
@@ -166,9 +171,6 @@ struct rand_smc_node *createsmctree(int *casz,
fhdptr = (struct fdt_header *)_binary___dtb_start;
- if (fdt_check_header((void *)fhdptr) != 0) {
- printf("ERROR, not device tree compliant\n");
- }
fhd = *fhdptr;
cntndarray = 0;
nrnode.entries = 0;
@@ -244,6 +246,8 @@ struct rand_smc_node *createsmctree(int *casz,
if (strcmp(cset, "functionname") == 0) {
pullstringdt(&dtb, dtb_beg, 0, cset);
push_3dfifo_fname(&f3d, cset);
+ pushnme(cset, &nf, mmod);
+ push_3dfifo_fid(&f3d, searchnme(cset, &nf, mmod));
leafnode = 1;
if (bias_count == 0U) {
bintnode = 1U;
@@ -279,6 +283,7 @@ struct rand_smc_node *createsmctree(int *casz,
for (unsigned int j = 0U; (int)j < cntndarray; j++) {
tndarray[j].biases = GENMALLOC(ndarray[j].entries * sizeof(int));
tndarray[j].snames = GENMALLOC(ndarray[j].entries * sizeof(char *));
+ tndarray[j].snameid = GENMALLOC(ndarray[j].entries * sizeof(int));
tndarray[j].norcall = GENMALLOC(ndarray[j].entries * sizeof(int));
tndarray[j].nname = GENMALLOC(ndarray[j].entries * sizeof(char *));
tndarray[j].treenodes = GENMALLOC(ndarray[j].entries * sizeof(struct rand_smc_node));
@@ -286,6 +291,7 @@ struct rand_smc_node *createsmctree(int *casz,
for (unsigned int i = 0U; (int)i < ndarray[j].entries; i++) {
tndarray[j].snames[i] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
strlcpy(tndarray[j].snames[i], ndarray[j].snames[i], MAX_NAME_CHARS);
+ tndarray[j].snameid[i] = ndarray[j].snameid[i];
tndarray[j].nname[i] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
strlcpy(tndarray[j].nname[i], ndarray[j].nname[i], MAX_NAME_CHARS);
tndarray[j].biases[i] = ndarray[j].biases[i];
@@ -303,6 +309,7 @@ struct rand_smc_node *createsmctree(int *casz,
}
tndarray[cntndarray].biases = GENMALLOC(f3d.row[f3d.col + 1] * sizeof(int));
tndarray[cntndarray].snames = GENMALLOC(f3d.row[f3d.col + 1] * sizeof(char *));
+ tndarray[cntndarray].snameid = GENMALLOC(f3d.row[f3d.col + 1] * sizeof(int));
tndarray[cntndarray].norcall = GENMALLOC(f3d.row[f3d.col + 1] * sizeof(int));
tndarray[cntndarray].nname = GENMALLOC(f3d.row[f3d.col + 1] * sizeof(char *));
tndarray[cntndarray].treenodes = GENMALLOC(f3d.row[f3d.col + 1] * sizeof(struct rand_smc_node));
@@ -313,9 +320,11 @@ struct rand_smc_node *createsmctree(int *casz,
*/
int cntbias = 0;
int bias_count = 0;
+
for (unsigned int j = 0U; (int)j < f3d.row[f3d.col + 1]; j++) {
tndarray[cntndarray].snames[j] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
strlcpy(tndarray[cntndarray].snames[j], f3d.fnamefifo[f3d.col + 1][j], MAX_NAME_CHARS);
+ tndarray[cntndarray].snameid[j] = f3d.fidfifo[f3d.col + 1][j];
tndarray[cntndarray].nname[j] = GENMALLOC(1 * sizeof(char[MAX_NAME_CHARS]));
strlcpy(tndarray[cntndarray].nname[j], f3d.nnfifo[f3d.col + 1][j], MAX_NAME_CHARS);
tndarray[cntndarray].biases[j] = f3d.biasfifo[f3d.col + 1][j];
@@ -355,6 +364,7 @@ struct rand_smc_node *createsmctree(int *casz,
GENFREE(ndarray[j].norcall);
GENFREE(ndarray[j].biasarray);
GENFREE(ndarray[j].snames);
+ GENFREE(ndarray[j].snameid);
GENFREE(ndarray[j].nname);
GENFREE(ndarray[j].treenodes);
}
@@ -377,6 +387,7 @@ struct rand_smc_node *createsmctree(int *casz,
GENFREE(f3d.nnfifo[f3d.col + 1]);
GENFREE(f3d.fnamefifo[f3d.col + 1]);
GENFREE(f3d.biasfifo[f3d.col + 1]);
+ GENFREE(f3d.fidfifo[f3d.col + 1]);
f3d.curr_col -= 1;
}
}
@@ -393,86 +404,25 @@ struct rand_smc_node *createsmctree(int *casz,
GENFREE(f3d.nnfifo[i]);
GENFREE(f3d.fnamefifo[i]);
GENFREE(f3d.biasfifo[i]);
+ GENFREE(f3d.fidfifo[i]);
}
GENFREE(f3d.nnfifo);
GENFREE(f3d.fnamefifo);
GENFREE(f3d.biasfifo);
+ GENFREE(f3d.fidfifo);
GENFREE(f3d.row);
dtdone = 1;
}
}
-
*casz = cntndarray;
return ndarray;
}
/*
- * Running SMC call from what function name is selected
- */
-void runtestfunction(char *funcstr)
-{
- if (strcmp(funcstr, "sdei_version") == 0) {
- long long ret = sdei_version();
- if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
- tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n",
- ret);
- }
- printf("running %s\n", funcstr);
- }
- if (strcmp(funcstr, "sdei_pe_unmask") == 0) {
- long long ret = sdei_pe_unmask();
- if (ret < 0) {
- tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n",
- ret);
- }
- printf("running %s\n", funcstr);
- }
- if (strcmp(funcstr, "sdei_pe_mask") == 0) {
- int64_t ret = sdei_pe_mask();
- if (ret < 0) {
- tftf_testcase_printf("SDEI pe mask failed: 0x%llx\n", ret);
- }
- printf("running %s\n", funcstr);
- }
- if (strcmp(funcstr, "sdei_event_status") == 0) {
- int64_t ret = sdei_event_status(0);
- if (ret < 0) {
- tftf_testcase_printf("SDEI event status failed: 0x%llx\n",
- ret);
- }
- printf("running %s\n", funcstr);
- }
- if (strcmp(funcstr, "sdei_event_signal") == 0) {
- int64_t ret = sdei_event_signal(0);
- if (ret < 0) {
- tftf_testcase_printf("SDEI event signal failed: 0x%llx\n",
- ret);
- }
- printf("running %s\n", funcstr);
- }
- if (strcmp(funcstr, "sdei_private_reset") == 0) {
- int64_t ret = sdei_private_reset();
- if (ret < 0) {
- tftf_testcase_printf("SDEI private reset failed: 0x%llx\n",
- ret);
- }
- printf("running %s\n", funcstr);
- }
- if (strcmp(funcstr, "sdei_shared_reset") == 0) {
- int64_t ret = sdei_shared_reset();
- if (ret < 0) {
- tftf_testcase_printf("SDEI shared reset failed: 0x%llx\n",
- ret);
- }
- printf("running %s\n", funcstr);
- }
-}
-
-/*
- * Top of SMC fuzzing module
+ * Function executes a single SMC fuzz test instance with a supplied seed.
*/
-test_result_t smc_fuzzing_top(void)
+test_result_t init_smc_fuzzing(void)
{
/*
* Setting up malloc block parameters
@@ -493,24 +443,27 @@ test_result_t smc_fuzzing_top(void)
tmod.checkadd = 1U;
tmod.checknumentries = 0U;
tmod.memerror = 0U;
- struct memmod *mmod;
mmod = &tmod;
- int cntndarray;
- struct rand_smc_node *tlnode;
/*
* Creating SMC bias tree
*/
- struct rand_smc_node *ndarray = createsmctree(&cntndarray, &tmod);
+ ndarray = createsmctree(&cntndarray, &tmod);
if (tmod.memerror != 0) {
return TEST_RESULT_FAIL;
}
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t smc_fuzzing_instance(uint32_t seed)
+{
+ struct rand_smc_node *tlnode;
/*
- * Hard coded seed, will change in the near future for better strategy
+ * Initialize pseudo random number generator with supplied seed.
*/
- srand(89758389);
+ srand(seed);
/*
* Code to traverse the bias tree and select function based on the biaes within
@@ -532,21 +485,27 @@ test_result_t smc_fuzzing_top(void)
* another loop to continue the process of selection until an eventual leaf
* node is found.
*/
- for (unsigned int i = 0U; i < 100U; i++) {
+ for (unsigned int i = 0U; i < SMC_FUZZ_CALLS_PER_INSTANCE; i++) {
tlnode = &ndarray[cntndarray - 1];
int nd = 0;
+
while (nd == 0) {
int nch = rand()%tlnode->biasent;
int selent = tlnode->biasarray[nch];
+
if (tlnode->norcall[selent] == 0) {
- runtestfunction(tlnode->snames[selent]);
+ runtestfunction(tlnode->snameid[selent]);
nd = 1;
} else {
tlnode = &tlnode->treenodes[selent];
}
}
}
+ return TEST_RESULT_SUCCESS;
+}
+test_result_t smc_fuzzing_deinit(void)
+{
/*
* End of test SMC selection and freeing of nodes
*/
@@ -560,6 +519,7 @@ test_result_t smc_fuzzing_top(void)
GENFREE(ndarray[j].norcall);
GENFREE(ndarray[j].biasarray);
GENFREE(ndarray[j].snames);
+ GENFREE(ndarray[j].snameid);
GENFREE(ndarray[j].nname);
GENFREE(ndarray[j].treenodes);
}
@@ -568,3 +528,99 @@ test_result_t smc_fuzzing_top(void)
return TEST_RESULT_SUCCESS;
}
+
+/*
+ * Top of SMC fuzzing module
+ */
+test_result_t smc_fuzzer_execute(void)
+{
+ /* These SMC_FUZZ_x macros are supplied by the build system. */
+ test_result_t results[SMC_FUZZ_INSTANCE_COUNT];
+ uint32_t seeds[SMC_FUZZ_INSTANCE_COUNT] = {SMC_FUZZ_SEEDS};
+ test_result_t result = TEST_RESULT_SUCCESS;
+ unsigned int i;
+
+ /* Run each instance. */
+ for (i = 0U; i < SMC_FUZZ_INSTANCE_COUNT; i++) {
+ printf("Starting SMC fuzz test with seed 0x%x\n", seeds[i]);
+ results[i] = smc_fuzzing_instance(seeds[i]);
+ }
+
+ /* Report successes and failures. */
+ printf("SMC Fuzz Test Results Summary\n");
+ for (i = 0U; i < SMC_FUZZ_INSTANCE_COUNT; i++) {
+ /* Display instance number. */
+ printf(" Instance #%d\n", i);
+
+ /* Print test results. */
+ printf(" Result: ");
+ if (results[i] == TEST_RESULT_SUCCESS) {
+ printf("SUCCESS\n");
+ } else if (results[i] == TEST_RESULT_FAIL) {
+ printf("FAIL\n");
+ /* If we got a failure, update the result value. */
+ result = TEST_RESULT_FAIL;
+ } else if (results[i] == TEST_RESULT_SKIPPED) {
+ printf("SKIPPED\n");
+ }
+
+ /* Print seed used */
+ printf(" Seed: 0x%x\n", seeds[i]);
+ }
+
+ /*
+ * Print out the smc fuzzer parameters so this test can be replicated.
+ */
+ printf("SMC fuzz build parameters to recreate this test:\n");
+ printf(" SMC_FUZZ_INSTANCE_COUNT=%u\n",
+ SMC_FUZZ_INSTANCE_COUNT);
+ printf(" SMC_FUZZ_CALLS_PER_INSTANCE=%u\n",
+ SMC_FUZZ_CALLS_PER_INSTANCE);
+ printf(" SMC_FUZZ_SEEDS=0x%x", seeds[0]);
+ for (i = 1U; i < SMC_FUZZ_INSTANCE_COUNT; i++) {
+ printf(",0x%x", seeds[i]);
+ }
+ printf("\n");
+
+ return result;
+}
+
+test_result_t smc_fuzzing_top(void)
+{
+ test_result_t result = TEST_RESULT_SUCCESS;
+ init_smc_fuzzing();
+
+#ifdef MULTI_CPU_SMC_FUZZER
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ int32_t aff_info __unused;
+ int64_t ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (lead_mpid == target_mpid) {
+ /* Run on this CPU */
+ if (smc_fuzzer_execute() != TEST_RESULT_SUCCESS)
+ return TEST_RESULT_FAIL;
+ } else {
+ /* Power on other CPU to run through fuzzing instructions */
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t) smc_fuzzer_execute, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long) target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+ }
+
+ smc_fuzzing_deinit();
+ return result;
+#else
+ result = smc_fuzzer_execute();
+ smc_fuzzing_deinit();
+ return result;
+#endif
+}
diff --git a/smc_fuzz/src/runtestfunction_helpers.c b/smc_fuzz/src/runtestfunction_helpers.c
new file mode 100644
index 000000000..c3b2cca7f
--- /dev/null
+++ b/smc_fuzz/src/runtestfunction_helpers.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sdei_fuzz_helper.h>
+#include <tsp_fuzz_helper.h>
+
+
+/*
+ * Invoke the SMC call based on the function name specified.
+ */
+void runtestfunction(int funcid)
+{
+ run_sdei_fuzz(funcid);
+ run_tsp_fuzz(funcid);
+}
diff --git a/smc_fuzz/src/sdei_fuzz_helper.c b/smc_fuzz/src/sdei_fuzz_helper.c
new file mode 100644
index 000000000..1d22335e5
--- /dev/null
+++ b/smc_fuzz/src/sdei_fuzz_helper.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <fuzz_names.h>
+#include <sdei_fuzz_helper.h>
+
+/*
+ * SDEI function that has no arguments
+ */
+void tftf_test_sdei_noarg(int64_t (*sdei_func)(void), char *funcstr)
+{
+ int64_t ret = (*sdei_func)();
+
+ if (ret < 0) {
+ tftf_testcase_printf("%s failed: 0x%llx\n", funcstr, ret);
+ }
+}
+
+/*
+ * SDEI function that has single argument
+ */
+void tftf_test_sdei_singlearg(int64_t (*sdei_func)(uint64_t), char *funcstr)
+{
+ int64_t ret = (*sdei_func)(0);
+
+ if (ret < 0) {
+ tftf_testcase_printf("%s failed: 0x%llx\n", funcstr, ret);
+ }
+}
+
+/*
+ * SDEI function called from fuzzer
+ */
+void run_sdei_fuzz(int funcid)
+{
+ if (funcid == sdei_version_funcid) {
+ long long ret = sdei_version();
+
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n",
+ ret);
+ }
+ } else if (funcid == sdei_pe_unmask_funcid) {
+ tftf_test_sdei_noarg(sdei_pe_unmask, "sdei_pe_unmuask");
+ } else if (funcid == sdei_pe_mask_funcid) {
+ tftf_test_sdei_noarg(sdei_pe_mask, "sdei_pe_mask");
+ } else if (funcid == sdei_event_status_funcid) {
+ tftf_test_sdei_singlearg((int64_t (*)(uint64_t))sdei_event_status,
+ "sdei_event_status");
+ } else if (funcid == sdei_event_signal_funcid) {
+ tftf_test_sdei_singlearg(sdei_event_signal, "sdei_event_signal");
+ } else if (funcid == sdei_private_reset_funcid) {
+ tftf_test_sdei_noarg(sdei_private_reset, "sdei_private_reset");
+ } else if (funcid == sdei_shared_reset_funcid) {
+ tftf_test_sdei_noarg(sdei_shared_reset, "sdei_shared_reset");
+ }
+}
diff --git a/smc_fuzz/src/tsp_fuzz_helper.c b/smc_fuzz/src/tsp_fuzz_helper.c
new file mode 100644
index 000000000..610fae023
--- /dev/null
+++ b/smc_fuzz/src/tsp_fuzz_helper.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <fuzz_names.h>
+#include <tsp_fuzz_helper.h>
+
+/*
+ * Generic TSP based function call for math operations
+ */
+void tftf_test_tsp_smc(uint64_t tsp_id, char *funcstr)
+{
+ uint64_t fn_identifier = TSP_FAST_FID(tsp_id);
+ uint64_t arg1 = 4;
+ uint64_t arg2 = 6;
+ smc_args tsp_svc_params = {fn_identifier, arg1, arg2};
+ smc_ret_values tsp_result;
+
+ tsp_result = tftf_smc(&tsp_svc_params);
+
+ if (tsp_result.ret0) {
+ tftf_testcase_printf("TSP operation 0x%x failed, error:0x%x\n",
+ (unsigned int) fn_identifier,
+ (unsigned int) tsp_result.ret0);
+ }
+}
+
+/*
+ * TSP function called from fuzzer
+ */
+void run_tsp_fuzz(int funcid)
+{
+ if (funcid == tsp_add_op_funcid) {
+ tftf_test_tsp_smc(TSP_ADD, "tsp_add_op");
+ } else if (funcid == tsp_sub_op_funcid) {
+ tftf_test_tsp_smc(TSP_SUB, "tsp_sub_op");
+ } else if (funcid == tsp_mul_op_funcid) {
+ tftf_test_tsp_smc(TSP_MUL, "tsp_mul_op");
+ } else if (funcid == tsp_div_op_funcid) {
+ tftf_test_tsp_smc(TSP_DIV, "tsp_div_op");
+ }
+}
diff --git a/spm/README.txt b/spm/README.txt
deleted file mode 100644
index a96a9fd61..000000000
--- a/spm/README.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-This directory provides sample Secure Partitions:
-
--Cactus is the main test Secure Partition run at S-EL1 on top of the S-EL2
-firmware. It complies with the FF-A 1.0 specification and provides sample
-ABI calls for setup and discovery, direct request/response messages, and
-memory sharing interfaces.
-
--Cactus-MM is a sample partition complying with the MM communication
-interface (not related to FF-A). It is run at S-EL0 on top of TF-A's
-SPM-MM implementation at EL3.
-
--Ivy and Quark are currently deprecated.
diff --git a/spm/cactus/aarch64/cactus_entrypoint.S b/spm/cactus/aarch64/cactus_entrypoint.S
index 154106337..17f07984f 100644
--- a/spm/cactus/aarch64/cactus_entrypoint.S
+++ b/spm/cactus/aarch64/cactus_entrypoint.S
@@ -1,13 +1,12 @@
/*
- * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
-#include <cactus_def.h>
-#include <cactus_platform_def.h>
+#include <sp_def.h>
.globl cactus_entrypoint
.globl secondary_cold_entry
@@ -15,20 +14,41 @@
/* Provision one stack per Execution Context (or vCPU) */
.section .bss.stacks
.balign CACHE_WRITEBACK_GRANULE
- .fill CACTUS_STACKS_SIZE * PLAT_CACTUS_CORE_COUNT
+ .fill SP_STACKS_SIZE * PLAT_SP_CORE_COUNT
stacks_end:
func cactus_entrypoint
/* Entry reason is primary EC cold boot */
mov x19, #1
+
+ /* Fall-through. */
+
secondary_cold_entry:
- /* Entry reason is secondary EC cold boot */
+ /*
+ * Entry reason is secondary EC cold boot (or primary EC cold
+ * boot from above).
+ */
+
+ /*
+ * x0 holds a pointer to the Boot Information Blob.
+ * Save it for later usage.
+ */
+ mov x20, x0
+
+ /* The SPMC passes the vCPU id in vMPIDR low bits. */
mrs x0, mpidr_el1
- bl platform_get_core_pos
+ bic x0, x0, #0x80000000
+
+ /*
+ * To maintain legacy, the SPMC passes the physical core id through x4.
+ * For a MP SP check the physical core id matches the vCPU id.
+ */
+ cmp x4, x0
+ bne .
- /* Setup the stack pointer. */
+ /* Setup the stack pointer (from the linear id stored in x0). */
adr x1, stacks_end
- mov x2, #CACTUS_STACKS_SIZE
+ mov x2, #SP_STACKS_SIZE
mul x2, x0, x2
sub sp, x1, x2
@@ -71,11 +91,17 @@ secondary_cold_entry:
pie_fixup:
ldr x0, =pie_fixup
and x0, x0, #~(0x1000 - 1)
- mov x1, #CACTUS_IMAGE_SIZE
+ mov x1, #SP_IMAGE_SIZE
add x1, x1, x0
bl fixup_gdt_reloc
- /* Jump to the C entrypoint (it does not return) */
+ /*
+ * Jump to the C entrypoint (it does not return).
+ * Pass the cold boot reason and BIB address.
+ */
0: mov x0, x19
+ mov x1, x20
+
+ /* And jump to the C entrypoint. */
b cactus_main
endfunc cactus_entrypoint
diff --git a/spm/cactus/aarch64/cactus_exceptions.S b/spm/cactus/aarch64/cactus_exceptions.S
index 31cdbf9a5..06df31c93 100644
--- a/spm/cactus/aarch64/cactus_exceptions.S
+++ b/spm/cactus/aarch64/cactus_exceptions.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -18,6 +18,15 @@
end_vector_entry \name
.endm
+.macro interrupt_vector _type
+ sub sp, sp, #0x100
+ save_gp_regs
+ bl cactus_interrupt_handler_\_type
+ restore_gp_regs
+ add sp, sp, #0x100
+ eret
+.endm
+
vector_base cactus_vector
/*
@@ -31,7 +40,9 @@ unhandled_exception serr_sp0
/*
* Current EL with SPx : 0x200 - 0x400.
*/
-unhandled_exception sync_spx
+vector_entry sync_spx
+ b sync_exception_vector_entry
+end_vector_entry sync_spx
vector_entry irq_spx
b irq_vector_entry
@@ -98,22 +109,28 @@ unhandled_exception serr_a32
ldp x0, x1, [sp, #0x0]
.endm
-func irq_vector_entry
+func sync_exception_vector_entry
sub sp, sp, #0x100
save_gp_regs
- bl cactus_irq_handler
- restore_gp_regs
+ mov x19, sp
+ bl tftf_sync_exception_handler
+ cbnz x0, 0f
+ mov x0, x19
+ /* Save original stack pointer value on the stack */
+ add x1, x0, #0x100
+ str x1, [x0, #0xf8]
+ b print_exception
+0: restore_gp_regs
add sp, sp, #0x100
eret
+endfunc sync_exception_vector_entry
+
+func irq_vector_entry
+ interrupt_vector irq
endfunc irq_vector_entry
func fiq_vector_entry
- sub sp, sp, #0x100
- save_gp_regs
- bl cactus_fiq_handler
- restore_gp_regs
- add sp, sp, #0x100
- eret
+ interrupt_vector fiq
endfunc fiq_vector_entry
func crash_dump
diff --git a/spm/cactus/cactus.h b/spm/cactus/cactus.h
index cbf2dcb24..c7176c271 100644
--- a/spm/cactus/cactus.h
+++ b/spm/cactus/cactus.h
@@ -26,11 +26,4 @@ extern uintptr_t __BSS_START__, __BSS_END__;
#define CACTUS_BSS_START ((uintptr_t)&__BSS_START__)
#define CACTUS_BSS_END ((uintptr_t)&__BSS_END__)
-enum stdout_route {
- PL011_AS_STDOUT = 0,
- HVC_CALL_AS_STDOUT,
-};
-
-void set_putc_impl(enum stdout_route);
-
#endif /* __CACTUS_H__ */
diff --git a/spm/cactus/cactus.ld.S b/spm/cactus/cactus.ld.S
index 50fc576ee..afd72cbc6 100644
--- a/spm/cactus/cactus.ld.S
+++ b/spm/cactus/cactus.ld.S
@@ -1,10 +1,10 @@
/*
- * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2017-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <cactus_def.h>
+#include <sp_def.h>
#include <platform_def.h>
#include <xlat_tables_defs.h>
@@ -14,7 +14,7 @@ ENTRY(cactus_entrypoint)
SECTIONS
{
- . = CACTUS_IMAGE_BASE;
+ . = SP_IMAGE_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"TEXT_START address is not aligned to PAGE_SIZE.")
diff --git a/spm/cactus/cactus.mk b/spm/cactus/cactus.mk
index ae66c1de9..4e86e3d9e 100644
--- a/spm/cactus/cactus.mk
+++ b/spm/cactus/cactus.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -13,7 +13,8 @@ ifneq (${CACTUS_PLAT_PATH},)
include ${CACTUS_PLAT_PATH}/platform.mk
endif
-CACTUS_DTB := $(BUILD_PLAT)/cactus.dtb
+CACTUS_DTB := $(BUILD_PLAT)/cactus.dtb
+SECURE_PARTITIONS += cactus
CACTUS_INCLUDES := \
-Itftf/framework/include \
@@ -21,53 +22,61 @@ CACTUS_INCLUDES := \
-Iinclude/common \
-Iinclude/common/${ARCH} \
-Iinclude/lib \
+ -Iinclude/lib/extensions \
-Iinclude/lib/${ARCH} \
-Iinclude/lib/utils \
-Iinclude/lib/xlat_tables \
-Iinclude/plat/common \
-Iinclude/runtime_services \
-Ispm/cactus \
- -Ispm/common
+ -Ispm/common \
+ -Ispm/common/sp_tests
CACTUS_SOURCES := \
$(addprefix spm/cactus/, \
aarch64/cactus_entrypoint.S \
aarch64/cactus_exceptions.S \
- cactus_debug.c \
cactus_interrupt.c \
cactus_main.c \
) \
$(addprefix spm/common/, \
- aarch64/sp_arch_helpers.S \
+ sp_debug.c \
sp_helpers.c \
- spm_helpers.c \
+ spm_helpers.c \
+ ) \
+ $(addprefix spm/common/sp_tests/, \
+ sp_test_ffa.c \
) \
$(addprefix spm/cactus/cactus_tests/, \
cactus_message_loop.c \
- cactus_test_cpu_features.c \
+ cactus_test_simd.c \
cactus_test_direct_messaging.c \
- cactus_test_ffa.c \
cactus_test_interrupts.c \
cactus_test_memory_sharing.c \
cactus_tests_smmuv3.c \
+ cactus_test_notifications.c \
)
# TODO: Remove dependency on TFTF files.
-CACTUS_SOURCES += \
- tftf/framework/debug.c \
- tftf/framework/${ARCH}/asm_debug.S \
- tftf/tests/runtime_services/secure_service/ffa_helpers.c \
- tftf/tests/runtime_services/secure_service/spm_common.c \
+CACTUS_SOURCES += \
+ tftf/framework/debug.c \
+ tftf/framework/${ARCH}/asm_debug.S \
+ tftf/tests/runtime_services/secure_service/${ARCH}/ffa_arch_helpers.S \
+ tftf/tests/runtime_services/secure_service/ffa_helpers.c \
+ tftf/tests/runtime_services/secure_service/spm_common.c \
tftf/framework/${ARCH}/exception_report.c
CACTUS_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S \
+ drivers/arm/sp805/sp805.c \
lib/${ARCH}/cache_helpers.S \
lib/${ARCH}/misc_helpers.S \
lib/smc/${ARCH}/asm_smc.S \
lib/smc/${ARCH}/smc.c \
lib/smc/${ARCH}/hvc.c \
+ lib/exceptions/${ARCH}/sync.c \
lib/locks/${ARCH}/spinlock.S \
lib/utils/mp_printf.c \
+ lib/extensions/fpu/fpu.c \
${XLAT_TABLES_LIB_SRCS}
CACTUS_LINKERFILE := spm/cactus/cactus.ld.S
@@ -80,9 +89,6 @@ $(eval $(call add_define,CACTUS_DEFINES,DEBUG))
$(eval $(call add_define,CACTUS_DEFINES,ENABLE_ASSERTIONS))
$(eval $(call add_define,CACTUS_DEFINES,ENABLE_BTI))
$(eval $(call add_define,CACTUS_DEFINES,ENABLE_PAUTH))
-$(eval $(call add_define,CACTUS_DEFINES,FVP_CLUSTER_COUNT))
-$(eval $(call add_define,CACTUS_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
-$(eval $(call add_define,CACTUS_DEFINES,FVP_MAX_PE_PER_CPU))
$(eval $(call add_define,CACTUS_DEFINES,LOG_LEVEL))
$(eval $(call add_define,CACTUS_DEFINES,PLAT_${PLAT}))
$(eval $(call add_define,CACTUS_DEFINES,PLAT_XLAT_TABLES_DYNAMIC))
@@ -91,14 +97,12 @@ $(CACTUS_DTB) : $(BUILD_PLAT)/cactus $(BUILD_PLAT)/cactus/cactus.elf
$(CACTUS_DTB) : $(CACTUS_DTS)
@echo " DTBGEN $@"
${Q}tools/generate_dtb/generate_dtb.sh \
- cactus ${CACTUS_DTS} $(BUILD_PLAT)
- ${Q}tools/generate_json/generate_json.sh \
- cactus $(BUILD_PLAT)
+ cactus ${CACTUS_DTS} $(BUILD_PLAT) $(CACTUS_DTB)
@echo
@echo "Built $@ successfully"
@echo
-cactus: $(CACTUS_DTB)
+cactus: $(CACTUS_DTB) SP_LAYOUT
# FDTS_CP copies flattened device tree sources
# $(1) = output directory
diff --git a/spm/cactus/cactus_def.h b/spm/cactus/cactus_def.h
deleted file mode 100644
index 190f06315..000000000
--- a/spm/cactus/cactus_def.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef CACTUS_DEF_H
-#define CACTUS_DEF_H
-
-#include <utils_def.h>
-
-/*
- * Layout of the Secure Partition image.
- */
-
-/* Up to 2 MiB at an arbitrary address that doesn't overlap the devices. */
-#define CACTUS_IMAGE_BASE ULL(0x1000)
-#define CACTUS_IMAGE_SIZE ULL(0x200000)
-
-/* Memory reserved for stacks */
-#define CACTUS_STACKS_SIZE ULL(0x1000)
-
-/*
- * RX/TX buffer used by VM's in SPM for memory sharing
- * Each VM allocated 2 pages, one for RX and one for TX buffer.
- */
-#define CACTUS_RX_BASE PLAT_CACTUS_RX_BASE
-#define CACTUS_TX_BASE CACTUS_RX_BASE + PAGE_SIZE
-#define CACTUS_RX_TX_SIZE PAGE_SIZE * 2
-
-/*
- * RX/TX buffer helpers.
- */
-#define get_sp_rx_start(sp_id) (CACTUS_RX_BASE + (((sp_id & 0x7FFFU) - 1U) * CACTUS_RX_TX_SIZE))
-#define get_sp_rx_end(sp_id) (CACTUS_RX_BASE + (((sp_id & 0x7FFFU) - 1U) * CACTUS_RX_TX_SIZE) + PAGE_SIZE)
-#define get_sp_tx_start(sp_id) (CACTUS_TX_BASE + (((sp_id & 0x7FFFU) - 1U) * CACTUS_RX_TX_SIZE))
-#define get_sp_tx_end(sp_id) (CACTUS_TX_BASE + (((sp_id & 0x7FFFU) - 1U) * CACTUS_RX_TX_SIZE) + PAGE_SIZE)
-
-#endif /* CACTUS_DEF_H */
diff --git a/spm/cactus/cactus_interrupt.c b/spm/cactus/cactus_interrupt.c
index 7de36cf0c..0a9035f75 100644
--- a/spm/cactus/cactus_interrupt.c
+++ b/spm/cactus/cactus_interrupt.c
@@ -1,51 +1,140 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <debug.h>
+#include "cactus_message_loop.h"
+#include "cactus_test_cmds.h"
+#include <drivers/arm/sp805.h>
#include <ffa_helpers.h>
#include <sp_helpers.h>
+#include "spm_common.h"
#include <spm_helpers.h>
-#include "cactus_test_cmds.h"
-#include "spm_common.h"
+#include <platform_def.h>
+
+#define NOTIFICATION_PENDING_INTERRUPT_INTID 5
+
+extern void notification_pending_interrupt_handler(void);
+
+extern ffa_id_t g_ffa_id;
+extern ffa_id_t g_dir_req_source_id;
+static uint32_t managed_exit_interrupt_id;
-extern ffa_vm_id_t g_ffa_id;
+/* Secure virtual interrupt that was last handled by Cactus SP. */
+uint32_t last_serviced_interrupt[PLATFORM_CORE_COUNT];
-static void managed_exit_handler(void)
+extern spinlock_t sp_handler_lock[NUM_VINT_ID];
+
+/*
+ * Managed exit ID discoverable by querying the SPMC through
+ * FFA_FEATURES API.
+ */
+void discover_managed_exit_interrupt_id(void)
{
+ struct ffa_value ffa_ret;
+
+ /* Interrupt ID value is returned through register W2. */
+ ffa_ret = ffa_features(FFA_FEATURE_MEI);
+ managed_exit_interrupt_id = ffa_feature_intid(ffa_ret);
+
+ VERBOSE("Discovered managed exit interrupt ID: %d\n",
+ managed_exit_interrupt_id);
+}
+
+/*
+ * Cactus SP does not implement application threads. Hence, once the Cactus SP
+ * sends the managed exit response to the direct request originator, execution
+ * is still frozen in interrupt handler context.
+ * Though it moves to WAITING state, it is not able to accept new direct request
+ * message from any endpoint. It can only receive a direct request message with
+ * the command CACTUS_RESUME_AFTER_MANAGED_EXIT from the originator of the
+ * suspended direct request message in order to return from the interrupt
+ * handler context and resume the processing of suspended request.
+ */
+void send_managed_exit_response(void)
+{
+ struct ffa_value ffa_ret;
+ bool waiting_resume_after_managed_exit;
+
/*
- * Real SP will save its context here.
- * Send interrupt ID for acknowledgement
+ * A secure partition performs its housekeeping and sends a direct
+ * response to signal interrupt completion. This is a pure virtual
+ * interrupt, no need for deactivation.
*/
- cactus_response(g_ffa_id, HYP_ID, MANAGED_EXIT_INTERRUPT_ID);
+ ffa_ret = cactus_response(g_ffa_id, g_dir_req_source_id,
+ MANAGED_EXIT_INTERRUPT_ID);
+ waiting_resume_after_managed_exit = true;
+
+ while (waiting_resume_after_managed_exit) {
+
+ waiting_resume_after_managed_exit =
+ (ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC32 &&
+ ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC64) ||
+ ffa_dir_msg_source(ffa_ret) != g_dir_req_source_id ||
+ cactus_get_cmd(ffa_ret) != CACTUS_RESUME_AFTER_MANAGED_EXIT;
+
+ if (waiting_resume_after_managed_exit) {
+ VERBOSE("Expected a direct message request from endpoint"
+ " %x with command CACTUS_RESUME_AFTER_MANAGED_EXIT\n",
+ g_dir_req_source_id);
+ ffa_ret = cactus_error_resp(g_ffa_id,
+ ffa_dir_msg_source(ffa_ret),
+ CACTUS_ERROR_TEST);
+ }
+ }
+ VERBOSE("Resuming the suspended command\n");
}
-int cactus_irq_handler(void)
+void register_maintenance_interrupt_handlers(void)
{
- uint32_t irq_num;
+ sp_register_interrupt_handler(send_managed_exit_response,
+ managed_exit_interrupt_id);
+ sp_register_interrupt_handler(notification_pending_interrupt_handler,
+ NOTIFICATION_PENDING_INTERRUPT_INTID);
+}
- irq_num = spm_interrupt_get();
+void cactus_interrupt_handler_irq(void)
+{
+ uint32_t intid = spm_interrupt_get();
+ unsigned int core_pos = get_current_core_id();
- ERROR("%s: Interrupt ID %u not handled!\n", __func__, irq_num);
+ last_serviced_interrupt[core_pos] = intid;
- return 0;
+ /* Invoke the handler registered by the SP. */
+ spin_lock(&sp_handler_lock[intid]);
+ if (sp_interrupt_handler[intid]) {
+ sp_interrupt_handler[intid]();
+ } else {
+ ERROR("%s: Interrupt ID %x not handled!\n", __func__, intid);
+ panic();
+ }
+ spin_unlock(&sp_handler_lock[intid]);
}
-int cactus_fiq_handler(void)
+void cactus_interrupt_handler_fiq(void)
{
- uint32_t fiq_num;
+ uint32_t intid = spm_interrupt_get();
+ unsigned int core_pos = get_current_core_id();
- fiq_num = spm_interrupt_get();
+ last_serviced_interrupt[core_pos] = intid;
- if (fiq_num == MANAGED_EXIT_INTERRUPT_ID) {
- managed_exit_handler();
+ if (intid == MANAGED_EXIT_INTERRUPT_ID) {
+ /*
+ * A secure partition performs its housekeeping and sends a
+ * direct response to signal interrupt completion.
+ * This is a pure virtual interrupt, no need for deactivation.
+ */
+ VERBOSE("vFIQ: Sending ME response to %x\n",
+ g_dir_req_source_id);
+ send_managed_exit_response();
} else {
- ERROR("%s: Interrupt ID %u not handled!\n", __func__, fiq_num);
+ /*
+ * Currently only managed exit interrupt is supported by vFIQ.
+ */
+ panic();
}
-
- return 0;
}
diff --git a/spm/cactus/cactus_main.c b/spm/cactus/cactus_main.c
index ff3f61871..b3f745182 100644
--- a/spm/cactus/cactus_main.c
+++ b/spm/cactus/cactus_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,22 +9,24 @@
#include <debug.h>
#include <cactus_message_loop.h>
-#include <cactus_platform_def.h>
#include <drivers/arm/pl011.h>
#include <drivers/console.h>
#include <lib/aarch64/arch_helpers.h>
#include <lib/tftf_lib.h>
#include <lib/xlat_tables/xlat_mmu_helpers.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
+
+#include <ffa_helpers.h>
#include <plat_arm.h>
#include <plat/common/platform.h>
#include <platform_def.h>
+#include <sp_debug.h>
#include <sp_helpers.h>
#include <spm_helpers.h>
#include <std_svc.h>
-#include "cactus_def.h"
-#include "cactus_tests.h"
+#include "sp_def.h"
+#include "sp_tests.h"
#include "cactus.h"
/* Host machine information injected by the build system in the ELF file. */
@@ -34,7 +36,7 @@ extern const char version_string[];
extern void secondary_cold_entry(void);
/* Global ffa_id */
-ffa_vm_id_t g_ffa_id;
+ffa_id_t g_ffa_id;
/*
*
@@ -45,10 +47,10 @@ ffa_vm_id_t g_ffa_id;
*
*/
-static void __dead2 message_loop(ffa_vm_id_t vm_id, struct mailbox_buffers *mb)
+static void __dead2 message_loop(ffa_id_t vm_id, struct mailbox_buffers *mb)
{
- smc_ret_values ffa_ret;
- ffa_vm_id_t destination;
+ struct ffa_value ffa_ret;
+ ffa_id_t destination;
/*
* This initial wait call is necessary to inform SPMD that
@@ -67,14 +69,31 @@ static void __dead2 message_loop(ffa_vm_id_t vm_id, struct mailbox_buffers *mb)
}
if (ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC32 &&
- ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC64) {
+ ffa_func_id(ffa_ret) != FFA_MSG_SEND_DIRECT_REQ_SMC64 &&
+ ffa_func_id(ffa_ret) != FFA_INTERRUPT &&
+ ffa_func_id(ffa_ret) != FFA_RUN) {
ERROR("%s(%u) unknown func id 0x%x\n",
__func__, vm_id, ffa_func_id(ffa_ret));
break;
}
- destination = ffa_dir_msg_dest(ffa_ret);
+ if ((ffa_func_id(ffa_ret) == FFA_INTERRUPT) ||
+ (ffa_func_id(ffa_ret) == FFA_RUN)) {
+ /*
+ * Received FFA_INTERRUPT in waiting state.
+ * The interrupt id is passed although this is just
+ * informational as we're running with virtual
+ * interrupts unmasked and the interrupt is processed
+ * by the interrupt handler.
+ *
+ * Received FFA_RUN in waiting state, the endpoint
+ * simply returns by FFA_MSG_WAIT.
+ */
+ ffa_ret = ffa_msg_wait();
+ continue;
+ }
+ destination = ffa_dir_msg_dest(ffa_ret);
if (destination != vm_id) {
ERROR("%s(%u) invalid vm id 0x%x\n",
__func__, vm_id, destination);
@@ -96,6 +115,10 @@ static const mmap_region_t cactus_mmap[] __attribute__((used)) = {
/* scratch memory allocated to be used for running SMMU tests */
MAP_REGION_FLAT(PLAT_CACTUS_MEMCPY_BASE, PLAT_CACTUS_MEMCPY_RANGE,
MT_MEMORY | MT_RW),
+#if PLAT_fvp
+ MAP_REGION_FLAT(PLAT_CACTUS_NS_MEMCPY_BASE, PLAT_CACTUS_MEMCPY_RANGE,
+ MT_MEMORY | MT_RW | MT_NS),
+#endif
{0}
};
@@ -124,6 +147,45 @@ static void cactus_print_memory_layout(unsigned int vm_id)
(void *)get_sp_tx_end(vm_id));
}
+static void cactus_print_boot_info(struct ffa_boot_info_header *boot_info_header)
+{
+ struct ffa_boot_info_desc *boot_info_desc;
+
+ if (boot_info_header == NULL) {
+ NOTICE("SP doesn't have boot information!\n");
+ return;
+ }
+
+ VERBOSE("SP boot info:\n");
+ VERBOSE(" Signature: %x\n", boot_info_header->signature);
+ VERBOSE(" Version: %x\n", boot_info_header->version);
+ VERBOSE(" Blob Size: %u\n", boot_info_header->info_blob_size);
+ VERBOSE(" Descriptor Size: %u\n", boot_info_header->desc_size);
+ VERBOSE(" Descriptor Count: %u\n", boot_info_header->desc_count);
+
+ boot_info_desc = boot_info_header->boot_info;
+
+ if (boot_info_desc == NULL) {
+ ERROR("Boot data arguments error...\n");
+ return;
+ }
+
+ for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
+ VERBOSE(" Boot Data:\n");
+ VERBOSE(" Type: %u\n",
+ ffa_boot_info_type(&boot_info_desc[i]));
+ VERBOSE(" Type ID: %u\n",
+ ffa_boot_info_type_id(&boot_info_desc[i]));
+ VERBOSE(" Flags:\n");
+ VERBOSE(" Name Format: %x\n",
+ ffa_boot_info_name_format(&boot_info_desc[i]));
+ VERBOSE(" Content Format: %x\n",
+ ffa_boot_info_content_format(&boot_info_desc[i]));
+ VERBOSE(" Size: %u\n", boot_info_desc[i].size);
+ VERBOSE(" Value: %llx\n", boot_info_desc[i].content);
+ }
+}
+
static void cactus_plat_configure_mmu(unsigned int vm_id)
{
mmap_add_region(CACTUS_TEXT_START,
@@ -145,44 +207,39 @@ static void cactus_plat_configure_mmu(unsigned int vm_id)
mmap_add_region(get_sp_rx_start(vm_id),
get_sp_rx_start(vm_id),
- (CACTUS_RX_TX_SIZE / 2),
+ (SP_RX_TX_SIZE / 2),
MT_RO_DATA);
mmap_add_region(get_sp_tx_start(vm_id),
get_sp_tx_start(vm_id),
- (CACTUS_RX_TX_SIZE / 2),
+ (SP_RX_TX_SIZE / 2),
MT_RW_DATA);
mmap_add(cactus_mmap);
init_xlat_tables();
}
-static void register_secondary_entrypoint(void)
+static struct ffa_value register_secondary_entrypoint(void)
{
- smc_args args;
+ struct ffa_value args;
args.fid = FFA_SECONDARY_EP_REGISTER_SMC64;
args.arg1 = (u_register_t)&secondary_cold_entry;
- tftf_smc(&args);
+ return ffa_service_call(&args);
}
-int tftf_irq_handler_dispatcher(void)
-{
- ERROR("%s\n", __func__);
-
- return 0;
-}
-
-void __dead2 cactus_main(bool primary_cold_boot)
+void __dead2 cactus_main(bool primary_cold_boot,
+ struct ffa_boot_info_header *boot_info_header)
{
assert(IS_IN_EL1() != 0);
struct mailbox_buffers mb;
+ struct ffa_value ret;
/* Get current FFA id */
- smc_ret_values ffa_id_ret = ffa_id_get();
- ffa_vm_id_t ffa_id = (ffa_vm_id_t)(ffa_id_ret.ret2 & 0xffff);
+ struct ffa_value ffa_id_ret = ffa_id_get();
+ ffa_id_t ffa_id = ffa_endpoint_id(ffa_id_ret);
if (ffa_func_id(ffa_id_ret) != FFA_SUCCESS_SMC32) {
ERROR("FFA_ID_GET failed.\n");
panic();
@@ -198,6 +255,23 @@ void __dead2 cactus_main(bool primary_cold_boot)
/* Configure and enable Stage-1 MMU, enable D-Cache */
cactus_plat_configure_mmu(ffa_id);
+
+ /* Initialize locks for tail end interrupt handler */
+ sp_handler_spin_lock_init();
+
+ if (boot_info_header != NULL) {
+ /*
+ * TODO: Currently just validating that cactus can
+ * access the boot info descriptors. In case we want to
+ * use the boot info contents, we should check the
+ * blob and remap if the size is bigger than one page.
+ * Only then access the contents.
+ */
+ mmap_add_dynamic_region(
+ (unsigned long long)boot_info_header,
+ (uintptr_t)boot_info_header,
+ PAGE_SIZE, MT_RO_DATA);
+ }
}
/*
@@ -216,38 +290,41 @@ void __dead2 cactus_main(bool primary_cold_boot)
goto msg_loop;
}
- if (ffa_id == SPM_VM_ID_FIRST) {
- console_init(CACTUS_PL011_UART_BASE,
- CACTUS_PL011_UART_CLK_IN_HZ,
- PL011_BAUDRATE);
+ set_putc_impl(FFA_SVC_SMC_CALL_AS_STDOUT);
- set_putc_impl(PL011_AS_STDOUT);
+ /* Below string is monitored by CI expect script. */
+ NOTICE("Booting Secure Partition (ID: %x)\n%s\n%s\n",
+ ffa_id, build_message, version_string);
- NOTICE("Booting Primary Cactus Secure Partition\n%s\n%s\n",
- build_message, version_string);
- } else {
- smc_ret_values ret;
- set_putc_impl(HVC_CALL_AS_STDOUT);
-
- NOTICE("Booting Secondary Cactus Secure Partition (ID: %x)\n%s\n%s\n",
- ffa_id, build_message, version_string);
-
- if (ffa_id == (SPM_VM_ID_FIRST + 2)) {
- VERBOSE("Mapping RXTX Region\n");
- CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
- if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
- ERROR(
- "Failed to map RXTX buffers. Error: %x\n",
- ffa_error_code(ret));
- panic();
- }
+ if (ffa_id == SP_ID(1)) {
+ cactus_print_boot_info(boot_info_header);
+ }
+
+ if (ffa_id == (SPM_VM_ID_FIRST + 2)) {
+ VERBOSE("Mapping RXTX Region\n");
+ CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
+ ERROR(
+ "Failed to map RXTX buffers. Error: %x\n",
+ ffa_error_code(ret));
+ panic();
}
}
- INFO("FF-A id: %x\n", ffa_id);
cactus_print_memory_layout(ffa_id);
- register_secondary_entrypoint();
+ ret = register_secondary_entrypoint();
+
+ /* FFA_SECONDARY_EP_REGISTER interface is not supported for UP SP. */
+ if (ffa_id == (SPM_VM_ID_FIRST + 2)) {
+ expect(ffa_func_id(ret), FFA_ERROR);
+ expect(ffa_error_code(ret), FFA_ERROR_NOT_SUPPORTED);
+ } else {
+ expect(ffa_func_id(ret), FFA_SUCCESS_SMC32);
+ }
+
+ discover_managed_exit_interrupt_id();
+ register_maintenance_interrupt_handlers();
/* Invoking Tests */
ffa_tests(&mb);
diff --git a/spm/cactus/cactus_tests.h b/spm/cactus/cactus_tests.h
deleted file mode 100644
index 1039ba5d7..000000000
--- a/spm/cactus/cactus_tests.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef CACTUS_TESTS_H
-#define CACTUS_TESTS_H
-
-#include <spm_common.h>
-
-/*
- * Test functions
- */
-
-void ffa_tests(struct mailbox_buffers *mb);
-
-/*
- * Test other things like the version number returned by SPM.
- */
-void misc_tests(void);
-
-/*
- * The Arm TF is responsible for setting up system registers on behalf of the
- * Secure Partition. For example, TF is supposed to allow Secure Partitions to
- * perform cache maintenance operations (by setting the SCTLR_EL1.UCI bit).
- *
- * This function attempts to verify that we indeed have access to these system
- * features from S-EL0. These tests report their results on the UART. They do
- * not recover from a failure : when an error is encountered they will most
- * likely trigger an exception into S-EL1.
- */
-void system_setup_tests(void);
-
-/*
- * Exercise the SP_MEMORY_ATTRIBUTES_SET_AARCH64 SMC interface. A variety of
- * valid and invalid requests to change memory attributes are tested.
- *
- * These tests report their results on the UART. They do not recover from a
- * failure : when an error is encountered they endlessly loop.
- */
-void mem_attr_changes_tests(void);
-
-#endif /* CACTUS_TESTS_H */
diff --git a/spm/cactus/cactus_tests/SMMUv3TestEngine.h b/spm/cactus/cactus_tests/SMMUv3TestEngine.h
new file mode 100644
index 000000000..b168d50d5
--- /dev/null
+++ b/spm/cactus/cactus_tests/SMMUv3TestEngine.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2015-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* -*- C -*-
+ *
+ * Copyright 2015 ARM Limited. All rights reserved.
+ */
+
+#ifndef ARM_INCLUDE_SMMUv3TestEngine_h
+#define ARM_INCLUDE_SMMUv3TestEngine_h
+
+#include <inttypes.h>
+
+///
+/// Notes on interfacing to PCIe
+/// ----------------------------
+///
+/// MSIAddress and MSIData are held in the MSI Table that is found by a BAR.
+///
+/// So if operating under PCIe then MSIAddress should be '1' and MSIData is
+/// interpreted as the vector to use (0..2048). If MSIAddress is not '0' or '1'
+/// then the frame is misconfigured.
+///
+/// StreamID is not run-time assignable as it is an attribute of the topology of
+/// the system.
+///
+/// In PCIe, then we need multiple instances of the engine and it shall occupy
+/// one Function.
+///
+/// Each BAR is 64 bits so the three BARs are:
+/// * BAR0 is going to point to a set of register frames, at least 128 KiB
+/// * BAR1/2 are MSI-X vector/pending bit array (PBA).
+///
+
+
+///
+/// The engine consists of a series of contiguous pairs of 64 KiB pages, each
+/// page consists of a series of frames. The frames in the first page (User
+/// Page) are expected to be able to be exposed to a low privileged piece of SW,
+/// whilst the second page (Privileged Page) is expected to be controlled by a
+/// higher level of SW.
+///
+/// Examples:
+/// 1) User Page controlled by EL1
+/// Privileged Page controlled by EL2
+/// 2) User Page controlled by EL0
+/// Privileged Page controlled by EL1
+///
+/// The engine can have an unlimited number of pairs.
+///
+/// Each pair of pages are full of register frames. The frames are the same
+/// size in both and frame N in the User page corresponds to frame N in the
+/// Privileged page.
+///
+/// The work load is setup by filling out all the non-cmd fields and then
+/// writing to cmd the command code. If Device-nGnR(n)E is used then no
+/// explicit barrier instruction is required.
+///
+/// When the work has finished then the engine sets cmd to ENGINE_HALTED or
+/// ENGINE_ERROR depending on if the engine encountered an error.
+///
+/// If the command was run then an MSI will be generated if msiaddress != 0,
+/// independent of if there was an error or not. If the MSI abort then
+/// uctrl.MSI_ABORTED is set.
+///
+/// If the frame/command was invalid for some reason then no MSI will be
+/// generated under the assumption that it can't trust the msiaddress field and
+/// ENGINE_FRAME_MISCONFIGURED is read out of cmd. Thus the user should write
+/// the command and then immediately read to see if it is in the
+/// ENGINE_FRAME_MISCONFIGURED state. It is guaranteed that that a read of cmd
+/// after writing cmd will immediately return ENGINE_FRAME_MISCONFIGURED if the
+/// command was invalid.
+///
+/// If the engine is not in the ENGINE_HALTED, ENGINE_ERROR or
+/// ENGINE_FRAME_MISCONFIGURED state then any writes are ignored.
+///
+/// As this is a model-only device then the error diagnostics are crude as it is
+/// expected that a verbose error trace stream will come from the model!
+///
+/// Most of the work-loads can be seeded to do work in a random order with
+/// random transaction sizes. The exact specification of the order and
+/// transaction size are TBD. It is intended that the algorithm used is
+/// specified so that you can work out the order that it should be done in.
+///
+/// The device can issue multiple outstanding transactions for each work-load.
+///
+/// The device will accept any size access for all fields except for cmd.
+///
+/// If a single burst access crosses the boundary of a user_frame the result is
+/// UNPREDICTABLE. From a programmer's perspective, then you can use any way of
+/// writing to within the same frame. However, you should only write to cmd_
+/// separately with a single 32 bit access.
+///
+/// Whilst running the whole frame is write-ignored and the unspecified values
+/// of udata and pdata are UNKNOWN.
+///
+/// The begin, end_incl, stride and seed are interpreted as follows:
+///
+/// * if [begin & ~7ull, end_incl | 7ull] == [0, ~0ull], ENGINE_FRAME_MISCONFIGURED
+/// * such a huge range is not supported for any stride!
+/// * stride == 0, ENGINE_FRAME_MISCONFIGURED
+/// * stride == 1, then the range operated on is [begin, end_incl]
+/// * stride is a multiple of 8
+/// * single 64 bit transfers are performed
+/// * the addresses used are:
+/// (begin & ~7ull) + n * stride for n = 0..N
+/// where the last byte accessed is <= (end_incl | 7)
+/// * for any other value of stride, ENGINE_FRAME_MISCONFIGURED
+/// * if stride > max(8, end_incl - begin + 1) then only a single
+/// element is transferred.
+/// * seed == 0 then the sequence of operation is n = 0, 1, 2, .. N
+/// though multiple in flight transactions could alter this order.
+/// * seed == ~0u then the sequence is n = N, N-1, N-2, .. 0
+/// * seed anything else then sequence randomly pulls one off the front
+/// or the back of the range.
+///
+/// The random number generator R is defined as:
+inline uint32_t testengine_random(uint64_t* storage_)
+{
+ *storage_ = (
+ *storage_ * 0x0005deecE66Dull + 0xB
+ ) & 0xffffFFFFffffull;
+ uint32_t const t = uint32_t((*storage_ >> 17 /* NOTE */) & 0x7FFFffff);
+
+ //
+ // Construct the topmost bit by running the generator again and
+ // choosing a bit from somewhere
+ //
+ *storage_ = (
+ *storage_ * 0x0005deecE66Dull + 0xB
+ ) & 0xffffFFFFffffull;
+ uint32_t const ret = uint32_t(t | (*storage_ & 0x80000000ull));
+ return ret;
+}
+
+// Seeding storage from the 'seed' field is:
+inline void testengine_random_seed_storage(uint64_t* storage_, uint32_t seed_)
+{
+ *storage_ = uint64_t(seed_) << 16 | 0x330e;
+}
+
+
+/// 128 bytes
+struct user_frame_t
+{
+ // -- 0 --
+ uint32_t cmd;
+ uint32_t uctrl;
+
+ // -- 1 --
+ // These keep track of how much work is being done by the engine.
+ uint32_t count_of_transactions_launched;
+ uint32_t count_of_transactions_returned;
+
+ // -- 2 --
+ // If operating under PCIe then msiaddress should be either 1 (send MSI-X)
+ // or 0 (don't send). The MSI-X to send is in msidata.
+ uint64_t msiaddress;
+
+ // -- 3 --
+ // If operating under PCIe then msidata is the MSI-X index in the MSI-X
+ // vector table to send (0..2047)
+ //
+ // If operating under PCIe then msiattr has no effect.
+ uint32_t msidata;
+ uint32_t msiattr; // encoded same bottom half of attributes field
+
+ //
+ // source and destination attributes, including NS attributes if SSD-s
+ // Includes 'instruction' attributes so the work load can look like
+ // instruction accesses.
+ //
+ // Each halfword encodes:
+ // 15:14 shareability 0..2 (nsh/ish/osh) (ACE encoding), ignored if a device type
+ // 13 outer transient, ignored unless outer ACACHE is cacheable
+ // 12 inner transient, ignored unless inner ACACHE is cacheable
+ // 10:8 APROT (AMBA encoding)
+ // 10 InD -- Instruction not Data
+ // 9 NS -- Non-secure
+ // 8 PnU -- Privileged not User
+ // 7:4 ACACHE encoding of outer
+ // 3:0 if 7:4 == {0,1}
+ // // Device type
+ // 3 Gathering if ACACHE is 1, ignored otherwise
+ // 2 Reordering if ACACHE is 1, ignored otherwise
+ // else
+ // // Normal type
+ // ACACHE encoding of inner
+ //
+ // ACACHE encodings:
+ // 0000 -- Device-nGnRnE
+ // 0001 -- Device-(n)G(n)RE -- depending on bits [3:2]
+ // 0010 -- NC-NB (normal non-cacheable non-bufferable)
+ // 0011 -- NC
+ // 0100 -- illegal
+ // 0101 -- illegal
+ // 0110 -- raWT
+ // 0111 -- raWB
+ // 1000 -- illegal
+ // 1001 -- illegal
+ // 1010 -- waWT
+ // 1011 -- waWB
+ // 1100 -- illegal
+ // 1101 -- illegal
+ // 1110 -- rawaWT
+ // 1111 -- rawaWB
+ //
+ // NOTE that the meaning of the ACACHE encodings are dependent on if it is a
+ // read or a write. AMBA can't encode directly the 'no-allocate cacheable'
+ // and you have to set the 'other' allocation hint. So for example, a read
+ // naWB has to be encoded as waWB. A write naWB has to be encoded as raWB,
+ // etc.
+ //
+ // Lowest halfword are 'source' attributes.
+ // Highest halfword are 'destination' attributes.
+ //
+ // NOTE that you can make an non-secure stream output a secure transaction
+ // -- the SMMU should sort it out.
+ //
+
+ // -- 4 --
+ // Under PCIe then a real Function does not have control over the attributes
+ // of the transactions that it makes. However, for testing purposes of the
+ // SMMU then we allow its attributes to be specified (and magically
+ // transport them over PCIe).
+ uint32_t attributes;
+ uint32_t seed;
+
+ // -- 5 --
+ uint64_t begin;
+ // -- 6 --
+ uint64_t end_incl;
+
+ // -- 7 --
+ uint64_t stride;
+
+ // -- 8 --
+ uint64_t udata[8];
+};
+
+// 128 bytes
+struct privileged_frame_t
+{
+ // -- 0 --
+ uint32_t pctrl;
+ uint32_t downstream_port_index; // [0,64), under PCIe only use port 0
+
+ // -- 1 --
+ // Under PCIe, then streamid is ignored.
+ uint32_t streamid;
+ uint32_t substreamid; // ~0u means no substreamid, otherwise must be a 20 bit number or ENGINE_FRAME_MISCONFIGURED
+
+ // -- 2 --
+ uint64_t pdata[14];
+};
+
+// 128 KiB
+struct engine_pair_t
+{
+ user_frame_t user[ 64 * 1024 / sizeof(user_frame_t)];
+ privileged_frame_t privileged[ 64 * 1024 / sizeof(privileged_frame_t)];
+};
+
+//
+// NOTE that we don't have a command that does some writes then some reads. For
+// the ACK this is probably not going to be much of a problem.
+//
+// On completion, an MSI will be sent if the msiaddress != 0.
+//
+enum cmd_t
+{
+ // ORDER IS IMPORTANT, see predicates later in this file.
+
+ // The frame was misconfigured.
+ ENGINE_FRAME_MISCONFIGURED = ~0u - 1,
+
+ // The engine encountered an error (downstream transaction aborted).
+ ENGINE_ERROR = ~0u,
+
+ // This frame is unimplemented or in use by the secure world.
+ //
+ // A user _can_ write this to cmd and it will be considered to be
+ // ENGINE_HALTED.
+ ENGINE_NO_FRAME = 0,
+
+ // The engine is halted.
+ ENGINE_HALTED = 1,
+
+ // The engine memcpy's from region [begin, end_incl] to address udata[0].
+ //
+ // If stride is 0 then ENGINE_ERROR is produced, udata[2] contains the error
+ // address. No MSI is generated.
+ //
+ // If stride is 1 then this is a normal memcpy(). If stride is larger then
+ // not all the data will be copied.
+ //
+ // The order and size of the transactions used are determined randomly using
+ // seed. If seed is:
+ // 0 -- do them from lowest address to highest address
+ // ~0u -- do them in reverse order
+ // otherwise use the value as a seed to do them in random order
+ // The ability to do them in a non-random order means that we stand a
+ // chance of getting merged event records.
+ //
+ // This models a work-load where we start with some reads and then do some
+ // writes.
+ ENGINE_MEMCPY = 2,
+
+ // The engine randomizes region [begin, end_incl] using rand48, seeded
+ // with seed and using the specified stride.
+ //
+ // The order and size of the transactions used are determined randomly using
+ // seed.
+ //
+ // The seed is used to create a random number generator that is used to
+ // choose the direction.
+ //
+ // A separate random number generator per transaction is then used based on
+ // seed and the address:
+ //
+ // seed_per_transaction = seed ^ (address >> 32) ^ (address & 0xFFFFffff);
+ //
+ // This seed is then used to seed a random number generator to fill the
+ // required space. The data used should be:
+ // uint64_t storage;
+ // for (uint8_t* p = (uintptr_t)begin; p != (uintptr_t)end_incl; ++ p)
+ // {
+ // // When we cross a 4 KiB we reseed.
+ // if ((p & 0xFFF) == 0 || p == begin)
+ // {
+ // testengine_random_seed_storage(
+ // V ^ ((uintptr_t)p >> 32) ^ (uint32_t((uintptr_t)p))
+ // );
+ // }
+ // assert( *p == (uint8_t)testengine_random(&storage) );
+ // ++ p;
+ // }
+ // This isn't the most efficient way of doing it as it throws away a lot of
+ // entropy from the call to testengine_random() but then we aren't aiming for
+ // good random numbers.
+ //
+ // If stride is 0 then ENGINE_ERROR is produced, data[2] contains the error
+ // address. (NOTE that data[1] is not used).
+ //
+ // If stride is 1 then this fills the entire buffer. If stride is larger
+ // then not all the data will be randomized.
+ //
+ // This models a write-only work-load.
+ ENGINE_RAND48 = 3,
+
+ // The engine reads [begin, end_incl], treats the region as a set of
+ // uint64_t and sums them, delivering the result to udata[1], using the
+ // specified stride.
+ //
+ // If stride is 0 then ENGINE_ERROR is produced, udata[2] is the error
+ // address.
+ //
+ // If stride is 1 then this sums the entire buffer. If stride is larger
+ // then not all the data will be summed.
+ //
+ // The order and size of the transactions used are determined randomly using
+ // seed.
+ //
+ // The begin must be 64 bit aligned (begin & 7) == 0 and the end_incl must
+ // end at the end of a 64 bit quantitity (end_incl & 7) == 7, otherwise
+ // ENGINE_FRAME_MISCONFIGURED is generated.
+ //
+ // This models a read-only work-load.
+ ENGINE_SUM64 = 4
+};
+
+static inline bool is_valid_and_running(cmd_t t_)
+{
+ unsigned const t = t_; // compensate for bad MSVC treating t_ as signed!
+ return ENGINE_MEMCPY <= t && t <= ENGINE_SUM64;
+}
+
+static inline bool is_in_error_state(cmd_t t_)
+{
+ return t_ == ENGINE_ERROR || t_ == ENGINE_FRAME_MISCONFIGURED;
+}
+
+static inline bool is_in_error_or_stopped_state(cmd_t t_)
+{
+ return t_ == ENGINE_NO_FRAME
+ || t_ == ENGINE_HALTED
+ || is_in_error_state(t_);
+}
+
+static inline bool is_invalid(cmd_t t_)
+{
+ unsigned const t = t_; // compensate for bad MSVC treating t_ as signed!
+ return ENGINE_SUM64 < t && t < ENGINE_FRAME_MISCONFIGURED;
+}
+
+/// pctrl has layout
+///
+/// 0 -- SSD_NS -- the stream and frame is non-secure
+/// -- note that if this is zero then it means the
+/// frame is controlled by secure SW and non-secure
+/// accesses are RAZ/WI (and so see ENGINE_NO_FRAME)
+/// Secure SW can only generate secure SSD StreamIDs
+/// This could be relaxed in the future if people need
+/// to.
+///
+/// 8 -- ATS_ENABLE -- CURRENTLY HAS NO EFFECT
+/// 9 -- PRI_ENABLE -- CURRENTLY HAS NO EFFECT
+///
+/// SSD_NS can only be altered by a secure access. Once clear then the
+/// corresponding user and privileged frames are accessible only to secure
+/// accesses. Non-secure accesses are RAZ/WI (and hence cmd will be
+/// ENGINE_NO_FRAME to non-secure accesses).
+///
+/// ATS_ENABLE/PRI_ENABLE are not currently implemented and their intent is for
+/// per-substreamid ATS/PRI support.
+///
+/// However, ATS/PRI support for the whole StreamID is advertised through the
+/// PCIe Extended Capabilities Header.
+///
+
+/// uctrl has layout
+///
+/// 0 -- MSI_ABORTED -- an MSI aborted (set by the engine)
+///
+/// 16-31 -- RATE -- some ill-defined metric for how fast to do the work!
+///
+
+#endif
diff --git a/spm/cactus/cactus_tests/cactus_message_loop.c b/spm/cactus/cactus_tests/cactus_message_loop.c
index fde707443..c0abf2b08 100644
--- a/spm/cactus/cactus_tests/cactus_message_loop.c
+++ b/spm/cactus/cactus_tests/cactus_message_loop.c
@@ -1,14 +1,24 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include "cactus_message_loop.h"
-#include "cactus_test_cmds.h"
-#include <ffa_helpers.h>
#include <debug.h>
+#include <cactus_message_loop.h>
+#include <cactus_test_cmds.h>
+#include <ffa_helpers.h>
+#include <events.h>
+#include <platform.h>
+#include <spm_helpers.h>
+
+/**
+ * Counter of the number of handled requests, for each CPU. The number of
+ * requests can be accessed from another Cactus SP, or from the normal world
+ * using a special test command.
+ */
+static uint32_t requests_counter[PLATFORM_CORE_COUNT];
/**
* Begin and end of command handler table, respectively. Both symbols defined by
@@ -19,23 +29,35 @@ extern struct cactus_cmd_handler cactus_cmd_handler_end[];
#define PRINT_CMD(smc_ret) \
VERBOSE("cmd %lx; args: %lx, %lx, %lx, %lx\n", \
- smc_ret.ret3, smc_ret.ret4, smc_ret.ret5, \
- smc_ret.ret6, smc_ret.ret7)
+ smc_ret.arg3, smc_ret.arg4, smc_ret.arg5, \
+ smc_ret.arg6, smc_ret.arg7)
+
+/* Global FFA_MSG_DIRECT_REQ source ID */
+ffa_id_t g_dir_req_source_id;
/**
* Traverses command table from section ".cactus_handler", searches for a
* registered command and invokes the respective handler.
*/
-bool cactus_handle_cmd(smc_ret_values *cmd_args, smc_ret_values *ret,
+bool cactus_handle_cmd(struct ffa_value *cmd_args, struct ffa_value *ret,
struct mailbox_buffers *mb)
{
uint64_t in_cmd;
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int core_pos = spm_get_my_core_pos();
+
if (cmd_args == NULL || ret == NULL) {
- ERROR("Invalid argumentos passed to %s!\n", __func__);
+ ERROR("Invalid arguments passed to %s!\n", __func__);
return false;
}
+ /* Get the source of the Direct Request message. */
+ if (ffa_func_id(*cmd_args) == FFA_MSG_SEND_DIRECT_REQ_SMC32 ||
+ ffa_func_id(*cmd_args) == FFA_MSG_SEND_DIRECT_REQ_SMC64) {
+ g_dir_req_source_id = ffa_dir_msg_source(*cmd_args);
+ }
+
PRINT_CMD((*cmd_args));
in_cmd = cactus_get_cmd(*cmd_args);
@@ -45,10 +67,33 @@ bool cactus_handle_cmd(smc_ret_values *cmd_args, smc_ret_values *ret,
it_cmd++) {
if (it_cmd->id == in_cmd) {
*ret = it_cmd->fn(cmd_args, mb);
+
+ /*
+ * Increment the number of requests handled in current
+ * core.
+ */
+ requests_counter[core_pos]++;
+
return true;
}
}
+ /* Handle special command. */
+ if (in_cmd == CACTUS_GET_REQ_COUNT_CMD) {
+ uint32_t requests_counter_resp;
+
+ /* Read value from array. */
+ requests_counter_resp = requests_counter[core_pos];
+ VERBOSE("Requests Counter %u, core: %u\n", requests_counter_resp,
+ core_pos);
+
+ *ret = cactus_success_resp(
+ ffa_dir_msg_dest(*cmd_args),
+ ffa_dir_msg_source(*cmd_args),
+ requests_counter_resp);
+ return true;
+ }
+
*ret = cactus_error_resp(ffa_dir_msg_dest(*cmd_args),
ffa_dir_msg_source(*cmd_args),
CACTUS_ERROR_UNHANDLED);
diff --git a/spm/cactus/cactus_tests/cactus_test_cpu_features.c b/spm/cactus/cactus_tests/cactus_test_cpu_features.c
deleted file mode 100644
index 7bf6e830b..000000000
--- a/spm/cactus/cactus_tests/cactus_test_cpu_features.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "cactus_message_loop.h"
-#include "cactus_test_cmds.h"
-#include "spm_common.h"
-
-/*
- * Fill SIMD vectors from secure world side with a unique value.
- * 0x22 is just a dummy value to be distinguished from the value
- * in the normal world.
- */
-CACTUS_CMD_HANDLER(req_simd_fill, CACTUS_REQ_SIMD_FILL_CMD)
-{
- simd_vector_t simd_vectors[SIMD_NUM_VECTORS];
-
- for (unsigned int num = 0U; num < SIMD_NUM_VECTORS; num++) {
- memset(simd_vectors[num], 0x22 * num, sizeof(simd_vector_t));
- }
-
- fill_simd_vector_regs(simd_vectors);
-
- return cactus_response(ffa_dir_msg_dest(*args),
- ffa_dir_msg_source(*args),
- CACTUS_SUCCESS);
-}
diff --git a/spm/cactus/cactus_tests/cactus_test_direct_messaging.c b/spm/cactus/cactus_tests/cactus_test_direct_messaging.c
index a59cfa24a..fd82f824b 100644
--- a/spm/cactus/cactus_tests/cactus_test_direct_messaging.c
+++ b/spm/cactus/cactus_tests/cactus_test_direct_messaging.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -23,9 +23,9 @@ CACTUS_CMD_HANDLER(echo_cmd, CACTUS_ECHO_CMD)
CACTUS_CMD_HANDLER(req_echo_cmd, CACTUS_REQ_ECHO_CMD)
{
- smc_ret_values ffa_ret;
- ffa_vm_id_t vm_id = ffa_dir_msg_dest(*args);
- ffa_vm_id_t echo_dest = cactus_req_echo_get_echo_dest(*args);
+ struct ffa_value ffa_ret;
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t echo_dest = cactus_req_echo_get_echo_dest(*args);
uint64_t echo_val = cactus_echo_get_val(*args);
VERBOSE("%x requested to send echo to %x, value %llx\n",
@@ -48,12 +48,12 @@ CACTUS_CMD_HANDLER(req_echo_cmd, CACTUS_REQ_ECHO_CMD)
return cactus_success_resp(vm_id, ffa_dir_msg_source(*args), 0);
}
-static smc_ret_values base_deadlock_handler(ffa_vm_id_t vm_id,
- ffa_vm_id_t source,
- ffa_vm_id_t deadlock_dest,
- ffa_vm_id_t deadlock_next_dest)
+static struct ffa_value base_deadlock_handler(ffa_id_t vm_id,
+ ffa_id_t source,
+ ffa_id_t deadlock_dest,
+ ffa_id_t deadlock_next_dest)
{
- smc_ret_values ffa_ret;
+ struct ffa_value ffa_ret;
ffa_ret = cactus_deadlock_send_cmd(vm_id, deadlock_dest,
deadlock_next_dest);
@@ -63,7 +63,7 @@ static smc_ret_values base_deadlock_handler(ffa_vm_id_t vm_id,
* an FF-A direct message, to the first partition.
*/
bool is_deadlock_detected = (ffa_func_id(ffa_ret) == FFA_ERROR) &&
- (ffa_error_code(ffa_ret) == FFA_ERROR_BUSY);
+ (ffa_error_code(ffa_ret) == FFA_ERROR_DENIED);
/*
* Should be true after the deadlock has been detected and after the
@@ -92,9 +92,9 @@ static smc_ret_values base_deadlock_handler(ffa_vm_id_t vm_id,
CACTUS_CMD_HANDLER(deadlock_cmd, CACTUS_DEADLOCK_CMD)
{
- ffa_vm_id_t source = ffa_dir_msg_source(*args);
- ffa_vm_id_t deadlock_dest = cactus_deadlock_get_next_dest(*args);
- ffa_vm_id_t deadlock_next_dest = source;
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t deadlock_dest = cactus_deadlock_get_next_dest(*args);
+ ffa_id_t deadlock_next_dest = source;
VERBOSE("%x is creating deadlock. next: %x\n", source, deadlock_dest);
@@ -104,13 +104,25 @@ CACTUS_CMD_HANDLER(deadlock_cmd, CACTUS_DEADLOCK_CMD)
CACTUS_CMD_HANDLER(req_deadlock_cmd, CACTUS_REQ_DEADLOCK_CMD)
{
- ffa_vm_id_t vm_id = ffa_dir_msg_dest(*args);
- ffa_vm_id_t source = ffa_dir_msg_source(*args);
- ffa_vm_id_t deadlock_dest = cactus_deadlock_get_next_dest(*args);
- ffa_vm_id_t deadlock_next_dest = cactus_deadlock_get_next_dest2(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t deadlock_dest = cactus_deadlock_get_next_dest(*args);
+ ffa_id_t deadlock_next_dest = cactus_deadlock_get_next_dest2(*args);
VERBOSE("%x requested deadlock with %x and %x\n",
ffa_dir_msg_source(*args), deadlock_dest, deadlock_next_dest);
return base_deadlock_handler(vm_id, source, deadlock_dest, deadlock_next_dest);
}
+
+CACTUS_CMD_HANDLER(ras_delegate_cmd, CACTUS_RAS_DELEGATE_CMD)
+{
+ uint64_t event_id = cactus_ras_get_event_id(*args);
+
+ INFO("Received RAS cmd at %x, value %llu.\n", ffa_dir_msg_dest(*args),
+ event_id);
+
+ return cactus_success_resp(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ event_id);
+}
diff --git a/spm/cactus/cactus_tests/cactus_test_ffa.c b/spm/cactus/cactus_tests/cactus_test_ffa.c
deleted file mode 100644
index 2ade7bd98..000000000
--- a/spm/cactus/cactus_tests/cactus_test_ffa.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-#include <assert.h>
-#include <debug.h>
-#include <errno.h>
-
-#include <cactus_def.h>
-#include <cactus_platform_def.h>
-#include <ffa_endpoints.h>
-#include <sp_helpers.h>
-#include <spm_helpers.h>
-#include <spm_common.h>
-
-#include <lib/libc/string.h>
-
-/* FFA version test helpers */
-#define FFA_MAJOR 1U
-#define FFA_MINOR 0U
-
-static const uint32_t primary_uuid[4] = PRIMARY_UUID;
-static const uint32_t secondary_uuid[4] = SECONDARY_UUID;
-static const uint32_t tertiary_uuid[4] = TERTIARY_UUID;
-static const uint32_t null_uuid[4] = {0};
-
-/*
- * Test FFA_FEATURES interface.
- */
-static void ffa_features_test(void)
-{
- const char *test_features = "FFA Features interface";
- smc_ret_values ffa_ret;
- const struct ffa_features_test *ffa_feature_test_target;
- unsigned int i, test_target_size =
- get_ffa_feature_test_target(&ffa_feature_test_target);
-
-
- announce_test_section_start(test_features);
-
- for (i = 0U; i < test_target_size; i++) {
- announce_test_start(ffa_feature_test_target[i].test_name);
-
- ffa_ret = ffa_features(ffa_feature_test_target[i].feature);
- expect(ffa_func_id(ffa_ret), ffa_feature_test_target[i].expected_ret);
- if (ffa_feature_test_target[i].expected_ret == FFA_ERROR) {
- expect(ffa_error_code(ffa_ret), FFA_ERROR_NOT_SUPPORTED);
- }
-
- announce_test_end(ffa_feature_test_target[i].test_name);
- }
-
- announce_test_section_end(test_features);
-}
-
-static void ffa_partition_info_helper(struct mailbox_buffers *mb, const uint32_t uuid[4],
- const struct ffa_partition_info *expected,
- const uint16_t expected_size)
-{
- smc_ret_values ret = ffa_partition_info_get(uuid);
- unsigned int i;
- expect(ffa_func_id(ret), FFA_SUCCESS_SMC32);
-
- struct ffa_partition_info *info = (struct ffa_partition_info *)(mb->recv);
- for (i = 0U; i < expected_size; i++) {
- expect(info[i].id, expected[i].id);
- expect(info[i].exec_context, expected[i].exec_context);
- expect(info[i].properties, expected[i].properties);
- }
-
- ret = ffa_rx_release();
- expect(ffa_func_id(ret), FFA_SUCCESS_SMC32);
-}
-
-static void ffa_partition_info_wrong_test(void)
-{
- const char *test_wrong_uuid = "Request wrong UUID";
- uint32_t uuid[4] = {1};
-
- announce_test_start(test_wrong_uuid);
-
- smc_ret_values ret = ffa_partition_info_get(uuid);
- expect(ffa_func_id(ret), FFA_ERROR);
- expect(ffa_error_code(ret), FFA_ERROR_INVALID_PARAMETER);
-
- announce_test_end(test_wrong_uuid);
-}
-
-static void ffa_partition_info_get_test(struct mailbox_buffers *mb)
-{
- const char *test_partition_info = "FFA Partition info interface";
- const char *test_primary = "Get primary partition info";
- const char *test_secondary = "Get secondary partition info";
- const char *test_tertiary = "Get tertiary partition info";
- const char *test_all = "Get all partitions info";
-
- const struct ffa_partition_info expected_info[] = {
- /* Primary partition info */
- {
- .id = SPM_VM_ID_FIRST,
- .exec_context = CACTUS_PRIMARY_EC_COUNT,
- /* Supports receipt of direct message requests. */
- .properties = 1U
- },
- /* Secondary partition info */
- {
- .id = SPM_VM_ID_FIRST + 1U,
- .exec_context = CACTUS_SECONDARY_EC_COUNT,
- .properties = 1U
- },
- /* Tertiary partition info */
- {
- .id = SPM_VM_ID_FIRST + 2U,
- .exec_context = CACTUS_TERTIARY_EC_COUNT,
- .properties = 1U
- }
- };
-
- announce_test_section_start(test_partition_info);
-
- announce_test_start(test_tertiary);
- ffa_partition_info_helper(mb, tertiary_uuid, &expected_info[2], 1);
- announce_test_end(test_tertiary);
-
- announce_test_start(test_secondary);
- ffa_partition_info_helper(mb, secondary_uuid, &expected_info[1], 1);
- announce_test_end(test_secondary);
-
- announce_test_start(test_primary);
- ffa_partition_info_helper(mb, primary_uuid, &expected_info[0], 1);
- announce_test_end(test_primary);
-
- announce_test_start(test_all);
- ffa_partition_info_helper(mb, null_uuid, expected_info, 3);
- announce_test_end(test_all);
-
- ffa_partition_info_wrong_test();
-
- announce_test_section_end(test_partition_info);
-}
-
-void ffa_version_test(void)
-{
- const char *test_ffa_version = "FFA Version interface";
-
- announce_test_start(test_ffa_version);
-
- smc_ret_values ret = ffa_version(MAKE_FFA_VERSION(FFA_MAJOR, FFA_MINOR));
- uint32_t spm_version = (uint32_t)ret.ret0;
-
- bool ffa_version_compatible =
- ((spm_version >> FFA_VERSION_MAJOR_SHIFT) == FFA_MAJOR &&
- (spm_version & FFA_VERSION_MINOR_MASK) >= FFA_MINOR);
-
- NOTICE("FFA_VERSION returned %u.%u; Compatible: %i\n",
- spm_version >> FFA_VERSION_MAJOR_SHIFT,
- spm_version & FFA_VERSION_MINOR_MASK,
- (int)ffa_version_compatible);
-
- expect((int)ffa_version_compatible, (int)true);
-
- announce_test_end(test_ffa_version);
-}
-
-void ffa_tests(struct mailbox_buffers *mb)
-{
- const char *test_ffa = "FFA Interfaces";
-
- announce_test_section_start(test_ffa);
-
- ffa_features_test();
- ffa_version_test();
- ffa_partition_info_get_test(mb);
-
- announce_test_section_end(test_ffa);
-}
diff --git a/spm/cactus/cactus_tests/cactus_test_interrupts.c b/spm/cactus/cactus_tests/cactus_test_interrupts.c
index b675dfc1e..2e0249c45 100644
--- a/spm/cactus/cactus_tests/cactus_test_interrupts.c
+++ b/spm/cactus/cactus_tests/cactus_test_interrupts.c
@@ -1,36 +1,133 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common/debug.h>
+#include <drivers/arm/sp805.h>
#include <sp_helpers.h>
#include <spm_helpers.h>
#include "cactus_message_loop.h"
#include "cactus_test_cmds.h"
+#include <mmio.h>
+#include <platform.h>
+
+/* Secure virtual interrupt that was last handled by Cactus SP. */
+extern uint32_t last_serviced_interrupt[PLATFORM_CORE_COUNT];
+static int flag_set;
+static volatile bool test_espi_handled;
+
+static void handle_sec_wdog_interrupt(void)
+{
+ /*
+ * Interrupt triggered due to Trusted watchdog timer expiry.
+ * Clear the interrupt and stop the timer.
+ */
+ VERBOSE("Trusted WatchDog timer stopped\n");
+ sp805_twdog_stop();
+
+ /* Perform secure interrupt de-activation. */
+ spm_interrupt_deactivate(IRQ_TWDOG_INTID);
+}
+
+static void check_sec_wdog_interrupt_triggered(void)
+{
+ handle_sec_wdog_interrupt();
+ expect(flag_set, 0);
+ flag_set = 1;
+}
+
CACTUS_CMD_HANDLER(sleep_cmd, CACTUS_SLEEP_CMD)
{
- uint64_t timer_freq = read_cntfrq_el0();
- uint64_t time1, time2, time_lapsed;
+ uint64_t time_lapsed;
uint32_t sleep_time = cactus_get_sleep_time(*args);
- VERBOSE("Request to sleep %x for %ums.\n", ffa_dir_msg_dest(*args), sleep_time);
+ VERBOSE("Request to sleep %x for %ums.\n", ffa_dir_msg_dest(*args),
+ sleep_time);
- time1 = read_cntvct_el0();
- sp_sleep(sleep_time);
- time2 = read_cntvct_el0();
+ time_lapsed = sp_sleep_elapsed_time(sleep_time);
- /* Lapsed time should be at least equal to sleep time */
- time_lapsed = ((time2 - time1) * 1000) / timer_freq;
+ /* Lapsed time should be at least equal to sleep time. */
+ VERBOSE("Sleep complete: %llu\n", time_lapsed);
return cactus_response(ffa_dir_msg_dest(*args),
ffa_dir_msg_source(*args),
time_lapsed);
}
+CACTUS_CMD_HANDLER(sleep_fwd_cmd, CACTUS_FWD_SLEEP_CMD)
+{
+ struct ffa_value ffa_ret;
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t fwd_dest = cactus_get_fwd_sleep_dest(*args);
+ uint32_t sleep_ms = cactus_get_sleep_time(*args);
+ bool hint_interrupted = cactus_get_fwd_sleep_interrupted_hint(*args);
+ bool fwd_dest_interrupted;
+
+ VERBOSE("VM%x requested %x to sleep for value %u\n",
+ ffa_dir_msg_source(*args), fwd_dest, sleep_ms);
+
+ ffa_ret = cactus_sleep_cmd(vm_id, fwd_dest, sleep_ms);
+
+ /*
+ * The target of the direct request could be pre-empted any number of
+ * times. Moreover, the target SP may or may not support managed exit.
+ * Hence, the target is allocated cpu cycles in this while loop.
+ */
+ while ((ffa_func_id(ffa_ret) == FFA_INTERRUPT) ||
+ is_expected_cactus_response(ffa_ret, MANAGED_EXIT_INTERRUPT_ID,
+ 0)) {
+ fwd_dest_interrupted = true;
+
+ if (ffa_func_id(ffa_ret) == FFA_INTERRUPT) {
+ /* Received FFA_INTERRUPT in blocked state. */
+ VERBOSE("Processing FFA_INTERRUPT while"
+ " blocked on direct response\n");
+
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int my_core_pos = spm_get_my_core_pos();
+
+ ffa_ret = ffa_run(fwd_dest, my_core_pos);
+ } else {
+ /*
+ * Destination sent managed exit response. Allocate
+ * dummy cycles through direct request message to
+ * destination SP.
+ */
+ VERBOSE("SP%x: received Managed Exit as response\n",
+ vm_id);
+ ffa_ret = cactus_resume_after_managed_exit(vm_id,
+ fwd_dest);
+ }
+ }
+
+ if (hint_interrupted && !fwd_dest_interrupted) {
+ ERROR("Forwaded destination of the sleep command was not"
+ " interrupted as anticipated\n");
+ return cactus_error_resp(vm_id, ffa_dir_msg_source(*args),
+ CACTUS_ERROR_TEST);
+ }
+
+ if (!is_ffa_direct_response(ffa_ret)) {
+ ERROR("Encountered error in CACTUS_FWD_SLEEP_CMD response\n");
+ return cactus_error_resp(vm_id, ffa_dir_msg_source(*args),
+ CACTUS_ERROR_FFA_CALL);
+ }
+
+ if (cactus_get_response(ffa_ret) < sleep_ms) {
+ ERROR("Request returned: %u ms!\n",
+ cactus_get_response(ffa_ret));
+ return cactus_error_resp(vm_id, ffa_dir_msg_source(*args),
+ CACTUS_ERROR_TEST);
+
+ }
+
+ return cactus_success_resp(vm_id, ffa_dir_msg_source(*args), 0);
+}
+
CACTUS_CMD_HANDLER(interrupt_cmd, CACTUS_INTERRUPT_CMD)
{
uint32_t int_id = cactus_get_interrupt_id(*args);
@@ -49,3 +146,137 @@ CACTUS_CMD_HANDLER(interrupt_cmd, CACTUS_INTERRUPT_CMD)
ffa_dir_msg_source(*args),
CACTUS_SUCCESS);
}
+
+CACTUS_CMD_HANDLER(twdog_cmd, CACTUS_TWDOG_START_CMD)
+{
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t source = ffa_dir_msg_source(*args);
+
+ uint64_t time_ms = cactus_get_wdog_duration(*args);
+
+ sp_register_interrupt_handler(handle_sec_wdog_interrupt,
+ IRQ_TWDOG_INTID);
+
+ VERBOSE("Starting TWDOG: %llums\n", time_ms);
+ sp805_twdog_refresh();
+ sp805_twdog_start((time_ms * ARM_SP805_TWDG_CLK_HZ) / 1000);
+
+ return cactus_success_resp(vm_id, source, time_ms);
+}
+
+bool handle_twdog_interrupt_sp_sleep(uint32_t sleep_time, uint64_t *time_lapsed)
+{
+ sp_register_interrupt_handler(check_sec_wdog_interrupt_triggered,
+ IRQ_TWDOG_INTID);
+ *time_lapsed += sp_sleep_elapsed_time(sleep_time);
+
+ if (flag_set == 0) {
+ return false;
+ }
+
+ /* Reset the flag and unregister the handler. */
+ flag_set = 0;
+ sp_unregister_interrupt_handler(IRQ_TWDOG_INTID);
+
+ return true;
+}
+
+CACTUS_CMD_HANDLER(sleep_twdog_cmd, CACTUS_SLEEP_TRIGGER_TWDOG_CMD)
+{
+ uint64_t time_lapsed = 0;
+ uint32_t sleep_time = cactus_get_sleep_time(*args) / 2;
+ uint64_t time_ms = cactus_get_wdog_trigger_duration(*args);
+
+ VERBOSE("Request to sleep %x for %ums.\n", ffa_dir_msg_dest(*args),
+ sleep_time);
+
+ if (!handle_twdog_interrupt_sp_sleep(sleep_time, &time_lapsed)) {
+ goto fail;
+ }
+
+ /* Lapsed time should be at least equal to sleep time. */
+ VERBOSE("Sleep complete: %llu\n", time_lapsed);
+
+ VERBOSE("Starting TWDOG: %llums\n", time_ms);
+ sp805_twdog_refresh();
+ sp805_twdog_start((time_ms * ARM_SP805_TWDG_CLK_HZ) / 1000);
+
+ VERBOSE("2nd Request to sleep %x for %ums.\n", ffa_dir_msg_dest(*args),
+ sleep_time);
+
+ if (!handle_twdog_interrupt_sp_sleep(sleep_time, &time_lapsed)) {
+ goto fail;
+ }
+
+ /* Lapsed time should be at least equal to sleep time. */
+ VERBOSE("2nd Sleep complete: %llu\n", time_lapsed);
+
+ return cactus_response(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ time_lapsed);
+fail:
+ /* Test failed. */
+ ERROR("Watchdog interrupt not handled\n");
+ return cactus_error_resp(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ CACTUS_ERROR_TEST);
+}
+
+CACTUS_CMD_HANDLER(interrupt_serviced_cmd, CACTUS_LAST_INTERRUPT_SERVICED_CMD)
+{
+ unsigned int core_pos = get_current_core_id();
+
+ return cactus_response(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ last_serviced_interrupt[core_pos]);
+}
+
+static void sec_interrupt_test_espi_handled(void)
+{
+ expect(test_espi_handled, false);
+ test_espi_handled = true;
+ NOTICE("Interrupt handler for test espi interrupt called\n");
+
+ /* Perform secure interrupt de-activation. */
+ spm_interrupt_deactivate(IRQ_ESPI_TEST_INTID);
+}
+
+CACTUS_CMD_HANDLER(trigger_espi_cmd, CACTUS_TRIGGER_ESPI_CMD)
+{
+ uint32_t espi_id = cactus_get_espi_id(*args);
+
+ /*
+ * The SiP function ID 0x82000100 must have been added to the SMC
+ * whitelist of the Cactus SP that invokes it.
+ */
+ smc_args plat_sip_call = {
+ .fid = 0x82000100,
+ .arg1 = espi_id,
+ };
+ smc_ret_values ret;
+
+ sp_register_interrupt_handler(sec_interrupt_test_espi_handled,
+ espi_id);
+
+ /*
+ * Call the low level assembler routine to make the SMC call bypassing
+ * tftf_smc, as tftf_smc will set SVE hint bit in SMC FID when CPU
+ * supports SVE and SVE traps are enabled.
+ *
+ * This can be changed to tftf_smc call once SPMC disregards SVE hint bit
+ * from function identification.
+ */
+ ret = asm_tftf_smc64(plat_sip_call.fid, plat_sip_call.arg1, 0, 0, 0,
+ 0, 0, 0);
+
+ if (ret.ret0 == SMC_UNKNOWN) {
+ ERROR("SiP SMC call not supported\n");
+ return cactus_error_resp(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ CACTUS_ERROR_TEST);
+ }
+
+ return cactus_response(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ test_espi_handled ? 1 : 0);
+}
diff --git a/spm/cactus/cactus_tests/cactus_test_memory_sharing.c b/spm/cactus/cactus_tests/cactus_test_memory_sharing.c
index e7bce50f4..4d738b198 100644
--- a/spm/cactus/cactus_tests/cactus_test_memory_sharing.c
+++ b/spm/cactus/cactus_tests/cactus_test_memory_sharing.c
@@ -1,96 +1,255 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <cactus_def.h>
+#include <sp_def.h>
#include "cactus_message_loop.h"
#include "cactus_test_cmds.h"
-#include "cactus_tests.h"
#include <debug.h>
#include <ffa_helpers.h>
#include <sp_helpers.h>
+#include "sp_tests.h"
+#include "spm_common.h"
+#include "stdint.h"
#include <xlat_tables_defs.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <sync.h>
-/* Memory section to be used for memory share operations */
-static __aligned(PAGE_SIZE) uint8_t share_page[PAGE_SIZE];
+static volatile uint32_t data_abort_gpf_triggered;
+
+static bool data_abort_gpf_handler(void)
+{
+ uint64_t esr_el1 = read_esr_el1();
+
+ VERBOSE("%s count %u esr_el1 %llx elr_el1 %lx\n",
+ __func__, data_abort_gpf_triggered, esr_el1,
+ read_elr_el1());
+
+ /* Expect a data abort because of a GPF. */
+ if ((EC_BITS(esr_el1) == EC_DABORT_CUR_EL) &&
+ ((ISS_BITS(esr_el1) & ISS_DFSC_MASK) == DFSC_GPF_DABORT)) {
+ data_abort_gpf_triggered++;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Each Cactus SP has a memory region dedicated to memory sharing tests
+ * described in their partition manifest.
+ * This function returns the expected base address depending on the
+ * SP ID (should be the same as the manifest).
+ */
+static void *share_page(ffa_id_t cactus_sp_id)
+{
+ switch (cactus_sp_id) {
+ case SP_ID(1):
+ return (void *)CACTUS_SP1_MEM_SHARE_BASE;
+ case SP_ID(2):
+ return (void *)CACTUS_SP2_MEM_SHARE_BASE;
+ case SP_ID(3):
+ return (void *)CACTUS_SP3_MEM_SHARE_BASE;
+ default:
+ ERROR("Helper function expecting a valid Cactus SP ID!\n");
+ panic();
+ }
+}
+
+static void *share_page_non_secure(ffa_id_t cactus_sp_id)
+{
+ if (cactus_sp_id != SP_ID(3)) {
+ ERROR("Helper function expecting a valid Cactus SP ID!\n");
+ panic();
+ }
+
+ return (void *)CACTUS_SP3_NS_MEM_SHARE_BASE;
+}
+
+static bool cactus_mem_unmap_and_relinquish(
+ struct ffa_composite_memory_region *composite,
+ void *send, ffa_memory_handle_t handle, ffa_id_t vm_id)
+{
+ int ret;
+
+ for (uint32_t i = 0; i < composite->constituent_count; i++) {
+ uint64_t base_address = (uint64_t)composite->constituents[i]
+ .address;
+ size_t size = composite->constituents[i].page_count * PAGE_SIZE;
+
+ ret = mmap_remove_dynamic_region(
+ (uint64_t)composite->constituents[i].address,
+ composite->constituents[i].page_count * PAGE_SIZE);
+
+ if (ret != 0) {
+ ERROR("Failed to unmap received memory region %llx "
+ "size: %lu (error:%d)\n",
+ base_address, size, ret);
+ return false;
+ }
+ }
+
+ if (!memory_relinquish((struct ffa_mem_relinquish *)send,
+ handle, vm_id)) {
+ return false;
+ }
+
+ return true;
+}
CACTUS_CMD_HANDLER(mem_send_cmd, CACTUS_MEM_SEND_CMD)
{
struct ffa_memory_region *m;
struct ffa_composite_memory_region *composite;
- int ret;
+ int ret = -1;
unsigned int mem_attrs;
uint32_t *ptr;
- ffa_vm_id_t source = ffa_dir_msg_source(*args);
- ffa_vm_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
uint32_t mem_func = cactus_req_mem_send_get_mem_func(*args);
uint64_t handle = cactus_mem_send_get_handle(*args);
-
- expect(memory_retrieve(mb, &m, handle, source, vm_id, mem_func), true);
+ ffa_memory_region_flags_t retrv_flags =
+ cactus_mem_send_get_retrv_flags(*args);
+ uint32_t words_to_write = cactus_mem_send_words_to_write(*args);
+ bool expect_exception = cactus_mem_send_expect_exception(*args);
+
+ struct ffa_memory_access receiver = ffa_memory_access_init(
+ vm_id, FFA_DATA_ACCESS_RW,
+ (mem_func == FFA_MEM_SHARE_SMC32)
+ ? FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED
+ : FFA_INSTRUCTION_ACCESS_NX,
+ 0, NULL);
+
+ expect(memory_retrieve(mb, &m, handle, source, &receiver, 1,
+ retrv_flags),
+ true);
composite = ffa_memory_region_get_composite(m, 0);
- VERBOSE("Address: %p; page_count: %x %x\n",
- composite->constituents[0].address,
- composite->constituents[0].page_count, PAGE_SIZE);
-
/* This test is only concerned with RW permissions. */
- if (ffa_get_data_access_attr(
- m->receivers[0].receiver_permissions.permissions) !=
- FFA_DATA_ACCESS_RW) {
+ if (m->receivers[0].receiver_permissions.permissions.data_access !=
+ FFA_DATA_ACCESS_RW) {
ERROR("Permissions not expected!\n");
return cactus_error_resp(vm_id, source, CACTUS_ERROR_TEST);
}
mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
- if (!IS_SP_ID(source)) {
+ if (m->attributes.security == FFA_MEMORY_SECURITY_NON_SECURE) {
mem_attrs |= MT_NS;
}
- ret = mmap_add_dynamic_region(
- (uint64_t)composite->constituents[0].address,
- (uint64_t)composite->constituents[0].address,
- composite->constituents[0].page_count * PAGE_SIZE,
- mem_attrs);
+ for (uint32_t i = 0; i < composite->constituent_count; i++) {
+ uint64_t base_address = (uint64_t)composite->constituents[i]
+ .address;
+ size_t size = composite->constituents[i].page_count * PAGE_SIZE;
- if (ret != 0) {
- ERROR("Failed first mmap_add_dynamic_region!\n");
- return cactus_error_resp(vm_id, source, CACTUS_ERROR_TEST);
+ ret = mmap_add_dynamic_region(
+ base_address, base_address, size, mem_attrs);
+
+ if (ret != 0) {
+ ERROR("Failed to map received memory region %llx "
+ "size: %lu (error:%d)\n",
+ base_address, size, ret);
+ return cactus_error_resp(vm_id,
+ source,
+ CACTUS_ERROR_TEST);
+ }
}
VERBOSE("Memory has been mapped\n");
- ptr = (uint32_t *) composite->constituents[0].address;
+ for (uint32_t i = 0; i < composite->constituent_count; i++) {
+ ptr = (uint32_t *) composite->constituents[i].address;
+
+ for (uint32_t j = 0; j < words_to_write; j++) {
+
+ /**
+ * Check that memory has been cleared by the SPMC
+ * before using it.
+ */
+ if ((retrv_flags & FFA_MEMORY_REGION_FLAG_CLEAR) != 0U) {
+ VERBOSE("Check if memory has been cleared.\n");
+ if (ptr[j] != 0) {
+ /*
+ * If it hasn't been cleared, shouldn't
+ * be used.
+ */
+ ERROR("Memory NOT cleared!\n");
+ cactus_mem_unmap_and_relinquish(composite,
+ mb->send,
+ handle, vm_id);
+ ffa_rx_release();
+ return cactus_error_resp(
+ vm_id, source,
+ CACTUS_ERROR_TEST);
+ }
+ } else {
+ /*
+ * In RME enabled systems, the memory is expected
+ * to be scrubbed on PAS updates from S to NS.
+ * As well, it is likely that the memory
+ * addresses are shadowed, and the contents are
+ * not visible accross updates from the
+ * different address spaces. As such, the SP
+ * shall not rely on memory content to be
+ * in any form. FFA_MEM_LEND/FFA_MEM_DONATE are
+ * thus considered for memory allocation
+ * purposes.
+ *
+ * Expect valid data if:
+ * - Operation between SPs.
+ * - Memory sharing from NWd to SP.
+ */
+ if ((mem_func != FFA_MEM_SHARE_SMC32 &&
+ !IS_SP_ID(m->sender)) ||
+ expect_exception) {
+ continue;
+ }
+
+ VERBOSE("Check memory contents. Expect %u "
+ "words of %x\n", words_to_write,
+ mem_func + 0xFFA);
+
+ /* SPs writing `mem_func` + 0xFFA. */
+ if (ptr[i] != mem_func + 0xFFA) {
+ ERROR("Memory content NOT as expected!\n");
+ cactus_mem_unmap_and_relinquish(
+ composite, mb->send, handle,
+ vm_id);
+ ffa_rx_release();
+ return cactus_error_resp(
+ vm_id, source,
+ CACTUS_ERROR_TEST);
+ }
+ }
+ }
+ }
+
+ data_abort_gpf_triggered = 0;
+ register_custom_sync_exception_handler(data_abort_gpf_handler);
/* Write mem_func to retrieved memory region for validation purposes. */
- VERBOSE("Writing: %x\n", mem_func);
- for (unsigned int i = 0U; i < 5U; i++)
- ptr[i] = mem_func;
+ for (uint32_t i = 0; i < composite->constituent_count; i++) {
+ ptr = (uint32_t *) composite->constituents[i].address;
+ for (uint32_t j = 0; j < words_to_write; j++) {
+ ptr[j] = mem_func + 0xFFA;
+ }
+ }
+
+ unregister_custom_sync_exception_handler();
/*
* A FFA_MEM_DONATE changes the ownership of the page, as such no
* relinquish is needed.
*/
- if (mem_func != FFA_MEM_DONATE_SMC32) {
- ret = mmap_remove_dynamic_region(
- (uint64_t)composite->constituents[0].address,
- composite->constituents[0].page_count * PAGE_SIZE);
-
- if (ret != 0) {
- ERROR("Failed first mmap_add_dynamic_region!\n");
- return cactus_error_resp(vm_id, source,
- CACTUS_ERROR_TEST);
- }
-
- if (!memory_relinquish((struct ffa_mem_relinquish *)mb->send,
- m->handle, vm_id)) {
- return cactus_error_resp(vm_id, source,
- CACTUS_ERROR_TEST);
- }
+ if (mem_func != FFA_MEM_DONATE_SMC32 &&
+ !cactus_mem_unmap_and_relinquish(composite, mb->send, handle,
+ vm_id)) {
+ return cactus_error_resp(vm_id, source,
+ CACTUS_ERROR_TEST);
}
if (ffa_func_id(ffa_rx_release()) != FFA_SUCCESS_SMC32) {
@@ -100,43 +259,85 @@ CACTUS_CMD_HANDLER(mem_send_cmd, CACTUS_MEM_SEND_CMD)
}
return cactus_success_resp(vm_id,
- source, 0);
+ source, data_abort_gpf_triggered);
}
CACTUS_CMD_HANDLER(req_mem_send_cmd, CACTUS_REQ_MEM_SEND_CMD)
{
- smc_ret_values ffa_ret;
+ struct ffa_value ffa_ret;
uint32_t mem_func = cactus_req_mem_send_get_mem_func(*args);
- ffa_vm_id_t receiver = cactus_req_mem_send_get_receiver(*args);
+ ffa_id_t receiver_id = cactus_req_mem_send_get_receiver(*args);
ffa_memory_handle_t handle;
- ffa_vm_id_t vm_id = ffa_dir_msg_dest(*args);
- ffa_vm_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ uint32_t *ptr;
+ bool non_secure = cactus_req_mem_send_get_non_secure(*args);
+ void *share_page_addr =
+ non_secure ? share_page_non_secure(vm_id) : share_page(vm_id);
+ unsigned int mem_attrs;
+ int ret;
+ const uint32_t words_to_write = 10;
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(receiver_id,
+ mem_func);
- VERBOSE("%x requested to send memory to %x (func: %x)\n",
- source, receiver, mem_func);
+ VERBOSE("%x requested to send memory to %x (func: %x), page: %llx\n",
+ source, receiver_id, mem_func, (uint64_t)share_page_addr);
const struct ffa_memory_region_constituent constituents[] = {
- {(void *)share_page, 1, 0}
+ {share_page_addr, 1, 0}
};
const uint32_t constituents_count = (sizeof(constituents) /
sizeof(constituents[0]));
- handle = memory_init_and_send(
- (struct ffa_memory_region *)mb->send, PAGE_SIZE,
- vm_id, receiver, constituents,
- constituents_count, mem_func);
+ VERBOSE("Sharing at 0x%llx\n", (uint64_t)constituents[0].address);
+ mem_attrs = MT_RW_DATA;
+
+ if (non_secure) {
+ mem_attrs |= MT_NS;
+ }
+
+ ret = mmap_add_dynamic_region(
+ (uint64_t)constituents[0].address,
+ (uint64_t)constituents[0].address,
+ constituents[0].page_count * PAGE_SIZE,
+ mem_attrs);
+
+ if (ret != 0) {
+ ERROR("Failed map share memory before sending (%d)!\n",
+ ret);
+ return cactus_error_resp(vm_id, source,
+ CACTUS_ERROR_TEST);
+ }
+
+ /* Write to memory before sharing to SP. */
+ if (IS_SP_ID(receiver_id)) {
+ for (size_t i = 0; i < constituents_count; i++) {
+ VERBOSE("Sharing Address: %p\n",
+ constituents[i].address);
+ ptr = (uint32_t *)constituents[i].address;
+ for (size_t j = 0; j < words_to_write; j++) {
+ ptr[j] = mem_func + 0xFFA;
+ }
+ }
+ }
+
+ handle = memory_init_and_send(mb->send, PAGE_SIZE, vm_id, &receiver, 1,
+ constituents, constituents_count,
+ mem_func, &ffa_ret);
/*
* If returned an invalid handle, we should break the test.
*/
if (handle == FFA_MEMORY_HANDLE_INVALID) {
- ERROR("Received an invalid FF-A memory Handle!\n");
+ VERBOSE("Received an invalid FF-A memory Handle!\n");
return cactus_error_resp(vm_id, source,
- CACTUS_ERROR_TEST);
+ ffa_error_code(ffa_ret));
}
- ffa_ret = cactus_mem_send_cmd(vm_id, receiver, mem_func, handle);
+ ffa_ret = cactus_mem_send_cmd(vm_id, receiver_id, mem_func, handle,
+ 0, 10, false);
if (!is_ffa_direct_response(ffa_ret)) {
return cactus_error_resp(vm_id, source, CACTUS_ERROR_FFA_CALL);
@@ -175,5 +376,17 @@ CACTUS_CMD_HANDLER(req_mem_send_cmd, CACTUS_REQ_MEM_SEND_CMD)
#endif
}
+ /* Always unmap the sent memory region, will be remapped by another
+ * test if needed. */
+ ret = mmap_remove_dynamic_region(
+ (uint64_t)constituents[0].address,
+ constituents[0].page_count * PAGE_SIZE);
+
+ if (ret != 0) {
+ ERROR("Failed to unmap share memory region (%d)!\n", ret);
+ return cactus_error_resp(vm_id, source,
+ CACTUS_ERROR_TEST);
+ }
+
return cactus_success_resp(vm_id, source, 0);
}
diff --git a/spm/cactus/cactus_tests/cactus_test_notifications.c b/spm/cactus/cactus_tests/cactus_test_notifications.c
new file mode 100644
index 000000000..6d7b41bcd
--- /dev/null
+++ b/spm/cactus/cactus_tests/cactus_test_notifications.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "cactus_message_loop.h"
+#include "cactus_test_cmds.h"
+#include "sp_tests.h"
+
+#include <ffa_helpers.h>
+#include <spm_helpers.h>
+#include <debug.h>
+
+/* Booleans to keep track of which CPUs handled NPI. */
+static bool npi_handled[PLATFORM_CORE_COUNT];
+
+/**
+ * Helper to access the above array and set the boolean for the specific CPU.
+ */
+void set_npi_handled(uint32_t vcpu_id, bool val)
+{
+ npi_handled[vcpu_id] = val;
+}
+
+/**
+ * Helper to get state of the boolean from `npi_handled` from the respective
+ * CPU.
+ */
+bool get_npi_handled(uint32_t vcpu_id)
+{
+ return npi_handled[vcpu_id];
+}
+
+void notification_pending_interrupt_handler(void)
+{
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int core_pos = spm_get_my_core_pos();
+
+ VERBOSE("NPI handled in core %u\n", core_pos);
+
+ set_npi_handled(core_pos, true);
+}
+
+
+CACTUS_CMD_HANDLER(notifications_bind, CACTUS_NOTIFICATION_BIND_CMD)
+{
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t receiver = cactus_notification_get_receiver(*args);
+ ffa_id_t sender = cactus_notification_get_sender(*args);
+ ffa_notification_bitmap_t notifications =
+ cactus_notification_get_notifications(*args);
+ uint32_t flags = cactus_notification_get_flags(*args);
+ struct ffa_value ret;
+
+ VERBOSE("Partition %x requested to bind notifications '%llx' to %x\n",
+ source, notifications, receiver);
+
+ ret = ffa_notification_bind(sender, receiver, flags, notifications);
+
+ if (is_ffa_call_error(ret)) {
+ return cactus_error_resp(vm_id, source, ffa_error_code(ret));
+ }
+
+ return cactus_response(vm_id, source, CACTUS_SUCCESS);
+}
+
+CACTUS_CMD_HANDLER(notifications_unbind, CACTUS_NOTIFICATION_UNBIND_CMD)
+{
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t receiver = cactus_notification_get_receiver(*args);
+ ffa_id_t sender = cactus_notification_get_sender(*args);
+ ffa_notification_bitmap_t notifications =
+ cactus_notification_get_notifications(*args);
+ struct ffa_value ret;
+
+ VERBOSE("Partition %x requested to unbind notifications '%llx' to %x\n",
+ source, notifications, receiver);
+
+ ret = ffa_notification_unbind(sender, receiver, notifications);
+
+ if (is_ffa_call_error(ret)) {
+ return cactus_error_resp(vm_id, source, ffa_error_code(ret));
+ }
+
+ return cactus_response(vm_id, source, CACTUS_SUCCESS);
+}
+
+CACTUS_CMD_HANDLER(notifications_get, CACTUS_NOTIFICATION_GET_CMD)
+{
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t notification_receiver =
+ cactus_notification_get_receiver(*args);
+ uint32_t flags = cactus_notification_get_flags(*args);
+ uint32_t vcpu_id = cactus_notification_get_vcpu(*args);
+ struct ffa_value ret;
+
+ VERBOSE("Partition %x requested to get notifications.\n", source);
+
+ ret = ffa_notification_get(notification_receiver, vcpu_id, flags);
+
+ if (is_ffa_call_error(ret)) {
+ return cactus_error_resp(vm_id, source, ffa_error_code(ret));
+ }
+
+ VERBOSE("Notifications returned:\n"
+ " from sp: %llx\n"
+ " from vm: %llx\n",
+ ffa_notifications_get_from_sp(ret),
+ ffa_notifications_get_from_vm(ret));
+
+ /* If requested to check the status of NPI, for the respective CPU. */
+ if (cactus_notifications_check_npi_handled(*args)) {
+
+ /* If NPI hasn't been handled return error for this test. */
+ if (!get_npi_handled(vcpu_id)) {
+ return cactus_error_resp(vm_id, source,
+ CACTUS_ERROR_TEST);
+ }
+
+ /* Reset NPI flag for the respective core. */
+ set_npi_handled(vcpu_id, false);
+ }
+
+ return cactus_notifications_get_success_resp(
+ vm_id, source, ffa_notifications_get_from_sp(ret),
+ ffa_notifications_get_from_vm(ret));
+}
+
+CACTUS_CMD_HANDLER(notifications_set, CACTUS_NOTIFICATIONS_SET_CMD)
+{
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_notification_bitmap_t notifications =
+ cactus_notification_get_notifications(*args);
+ ffa_id_t receiver = cactus_notifications_set_get_receiver(*args);
+ ffa_id_t sender = cactus_notifications_set_get_sender(*args);
+ ffa_id_t echo_dest = cactus_req_echo_get_echo_dest(*args);
+ uint32_t flags = cactus_notification_get_flags(*args);
+ struct ffa_value ret;
+
+ VERBOSE("Partition %x requested to set notifications.\n", source);
+
+ ret = ffa_notification_set(sender, receiver, flags, notifications);
+
+ if (is_ffa_call_error(ret)) {
+ return cactus_error_resp(vm_id, source, ffa_error_code(ret));
+ }
+
+ /*
+ * If flag to delay notification pending interrupt, an echo test command
+ * should be sent to another SP, to validate SWd is not preempted.
+ */
+ if ((flags & FFA_NOTIFICATIONS_FLAG_DELAY_SRI) != 0 &&
+ IS_SP_ID(echo_dest)) {
+ VERBOSE("Delay SRI. Test Echo to %x.\n", echo_dest);
+ ret = cactus_echo_send_cmd(vm_id, echo_dest,
+ FFA_NOTIFICATION_SET);
+
+ if (!is_expected_cactus_response(ret, CACTUS_SUCCESS,
+ FFA_NOTIFICATION_SET)) {
+ ERROR("Echo Failed!\n");
+ return cactus_error_resp(vm_id, source,
+ CACTUS_ERROR_TEST);
+ }
+ }
+
+ VERBOSE("Set notifications handled (core %u)!\n", get_current_core_id());
+
+ return cactus_response(vm_id, source, CACTUS_SUCCESS);
+}
diff --git a/spm/cactus/cactus_tests/cactus_test_simd.c b/spm/cactus/cactus_tests/cactus_test_simd.c
new file mode 100644
index 000000000..bcf1c388a
--- /dev/null
+++ b/spm/cactus/cactus_tests/cactus_test_simd.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "cactus_message_loop.h"
+#include "cactus_test_cmds.h"
+#include <fpu.h>
+#include <spm_helpers.h>
+#include "spm_common.h"
+
+/*
+ * Note Test must exercise FILL and COMPARE command in
+ * sequence and on same CPU.
+ */
+static fpu_state_t sp_fpu_state_write;
+static fpu_state_t sp_fpu_state_read;
+static unsigned int core_pos;
+/*
+ * Fill SIMD vectors from secure world side with a unique value.
+ */
+CACTUS_CMD_HANDLER(req_simd_fill, CACTUS_REQ_SIMD_FILL_CMD)
+{
+ /* Get vCPU index for currently running vCPU. */
+ core_pos = spm_get_my_core_pos();
+ fpu_state_write_rand(&sp_fpu_state_write);
+ return cactus_response(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ CACTUS_SUCCESS);
+}
+
+/*
+ * compare FPU state(SIMD vectors, FPCR, FPSR) from secure world side with the previous
+ * SIMD_SECURE_VALUE unique value.
+ */
+CACTUS_CMD_HANDLER(req_simd_compare, CACTUS_CMP_SIMD_VALUE_CMD)
+{
+ bool test_succeed = false;
+
+ /* Get vCPU index for currently running vCPU. */
+ unsigned int core_pos1 = spm_get_my_core_pos();
+ if (core_pos1 == core_pos) {
+ fpu_state_read(&sp_fpu_state_read);
+ if (fpu_state_compare(&sp_fpu_state_write,
+ &sp_fpu_state_read) == 0) {
+ test_succeed = true;
+ }
+ }
+ return cactus_response(ffa_dir_msg_dest(*args),
+ ffa_dir_msg_source(*args),
+ test_succeed ? CACTUS_SUCCESS : CACTUS_ERROR);
+}
diff --git a/spm/cactus/cactus_tests/cactus_tests_smmuv3.c b/spm/cactus/cactus_tests/cactus_tests_smmuv3.c
index ce53dc64a..5308d933c 100644
--- a/spm/cactus/cactus_tests/cactus_tests_smmuv3.c
+++ b/spm/cactus/cactus_tests/cactus_tests_smmuv3.c
@@ -1,81 +1,52 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdint.h>
-#include <arch_helpers.h>
+#include <assert.h>
#include "cactus.h"
+#include <arch_helpers.h>
#include "cactus_message_loop.h"
-#include <cactus_platform_def.h>
+#include <sp_platform_def.h>
#include "cactus_test_cmds.h"
-#include "cactus_tests.h"
#include <debug.h>
#include <ffa_helpers.h>
#include <mmio.h>
#include "smmuv3_test_engine.h"
#include <sp_helpers.h>
+#include "sp_tests.h"
#include <spm_common.h>
-/* Source and target address for memcopy operation */
-#define MEMCPY_SOURCE_BASE PLAT_CACTUS_MEMCPY_BASE
-#define MEMPCY_TOTAL_SIZE (PLAT_CACTUS_MEMCPY_RANGE / 2)
-#define MEMCPY_TARGET_BASE (MEMCPY_SOURCE_BASE + MEMPCY_TOTAL_SIZE)
-
/* Miscellaneous */
#define NO_SUBSTREAMID (0xFFFFFFFFU)
-#define TRANSFER_SIZE (MEMPCY_TOTAL_SIZE / FRAME_COUNT)
#define LOOP_COUNT (5000U)
-static bool run_smmuv3_test(void)
+static bool run_testengine(uint32_t operation, uintptr_t source_addr,
+ uintptr_t target_addr, size_t transfer_size,
+ uint32_t attributes)
{
- uint64_t source_addr, cpy_range, target_addr;
- uint64_t begin_addr, end_addr, dest_addr;
+ const uint32_t streamID_list[] = { 0U, 1U };
+ uintptr_t begin_addr;
+ uintptr_t end_addr;
+ uintptr_t dest_addr;
uint32_t status;
- unsigned int i, f, attempts;
-
- /*
- * The test engine's MEMCPY command copies data from the region in
- * range [begin, end_incl] to the region with base address as udata.
- * In this test, we configure the test engine to initiate memcpy from
- * scratch page located at MEMCPY_SOURCE_BASE to the page located at
- * address MEMCPY_TARGET_BASE
- */
-
- VERBOSE("CACTUS: Running SMMUv3 test\n");
+ uint32_t f;
+ uint32_t attempts;
- source_addr = MEMCPY_SOURCE_BASE;
- cpy_range = MEMPCY_TOTAL_SIZE;
- target_addr = MEMCPY_TARGET_BASE;
- uint32_t streamID_list[] = { 0U, 1U };
-
- uint64_t data[] = {
- ULL(0xBAADFEEDCEEBDAAF),
- ULL(0x0123456776543210)
- };
-
- /* Write pre-determined content to source pages */
- for (i = 0U; i < (cpy_range / 8U); i++) {
- mmio_write64_offset(source_addr, i * 8, data[i%2]);
- }
-
- /* Clean the data caches */
- clean_dcache_range(source_addr, cpy_range);
-
- /*
- * Make sure above load, store and cache maintenance instructions
- * complete before we start writing to TestEngine frame configuration
- * fields
- */
- dsbsy();
+ assert(operation == ENGINE_MEMCPY || operation == ENGINE_RAND48);
for (f = 0U; f < FRAME_COUNT; f++) {
- attempts = 0U;
- begin_addr = source_addr + (TRANSFER_SIZE * f);
- end_addr = begin_addr + TRANSFER_SIZE - 1U;
- dest_addr = target_addr + (TRANSFER_SIZE * f);
+ begin_addr = source_addr + (transfer_size * f);
+ end_addr = begin_addr + transfer_size - 1U;
+
+ if (operation == ENGINE_MEMCPY) {
+ dest_addr = target_addr + (transfer_size * f);
+ } else {
+ dest_addr = 0;
+ }
/* Initiate DMA sequence */
mmio_write32_offset(PRIV_BASE_FRAME + F_IDX(f), PCTRL_OFF, 0);
@@ -84,7 +55,12 @@ static bool run_smmuv3_test(void)
mmio_write32_offset(PRIV_BASE_FRAME + F_IDX(f), SUBSTREAM_ID_OFF, NO_SUBSTREAMID);
mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), UCTRL_OFF, 0);
- mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), SEED_OFF, 0);
+ mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), ATTR_OFF, attributes);
+
+ if (operation == ENGINE_RAND48) {
+ mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), SEED_OFF, (f + 1) * 42);
+ }
+
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), BEGIN_OFF, begin_addr);
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), END_CTRL_OFF, end_addr);
@@ -92,8 +68,8 @@ static bool run_smmuv3_test(void)
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), STRIDE_OFF, 1);
mmio_write64_offset(USR_BASE_FRAME + F_IDX(f), UDATA_OFF, dest_addr);
- mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF, ENGINE_MEMCPY);
- VERBOSE("SMMUv3TestEngine: Waiting for MEMCPY completion for frame: %u\n", f);
+ mmio_write32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF, operation);
+ VERBOSE("SMMUv3TestEngine: waiting completion for frame: %u\n", f);
/*
* It is guaranteed that a read of "cmd" fields after writing to it will
@@ -101,17 +77,18 @@ static bool run_smmuv3_test(void)
* invalid.
*/
if (mmio_read32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF) == ENGINE_MIS_CFG) {
- ERROR("SMMUv3TestEngine: Misconfigured for frame: %u\n", f);
+ ERROR("SMMUv3TestEngine: misconfigured for frame: %u\n", f);
return false;
}
- /* Wait for mem copy to be complete */
+ /* Wait for operation to be complete */
+ attempts = 0U;
while (attempts++ < LOOP_COUNT) {
status = mmio_read32_offset(USR_BASE_FRAME + F_IDX(f), CMD_OFF);
if (status == ENGINE_HALTED) {
break;
} else if (status == ENGINE_ERROR) {
- ERROR("SMMUv3: Test failed\n");
+ ERROR("SMMUv3: test failed, engine error.\n");
return false;
}
@@ -123,38 +100,70 @@ static bool run_smmuv3_test(void)
}
if (attempts == LOOP_COUNT) {
- ERROR("SMMUv3: Test failed\n");
+ ERROR("SMMUv3: test failed, exceeded max. wait loop.\n");
return false;
}
dsbsy();
}
+ return true;
+}
+
+static bool run_smmuv3_memcpy(uintptr_t start_address, size_t size, uint32_t attributes)
+{
+ uintptr_t target_address;
+ size_t cpy_range = size >> 1;
+ bool ret;
+
/*
- * Invalidate cached entries to force the CPU to fetch the data from
- * Main memory
+ * The test engine's MEMCPY command copies data from the region in
+ * range [begin, end_incl] to the region with base address as udata.
+ * In this test, we configure the test engine to initiate memcpy from
+ * scratch page located at MEMCPY_SOURCE_BASE to the page located at
+ * address MEMCPY_TARGET_BASE
*/
- inv_dcache_range(source_addr, cpy_range);
- inv_dcache_range(target_addr, cpy_range);
- /* Compare source and destination memory locations for data */
- for (i = 0U; i < (cpy_range / 8U); i++) {
- if (mmio_read_64(source_addr + 8 * i) != mmio_read_64(target_addr + 8 * i)) {
- ERROR("SMMUv3: Mem copy failed: %llx\n", target_addr + 8 * i);
- return false;
+ target_address = start_address + cpy_range;
+ ret = run_testengine(ENGINE_MEMCPY, start_address, target_address,
+ cpy_range / FRAME_COUNT, attributes);
+
+ if (ret) {
+ /*
+ * Invalidate cached entries to force the CPU to fetch the data from
+ * Main memory
+ */
+ inv_dcache_range(start_address, cpy_range);
+ inv_dcache_range(target_address, cpy_range);
+
+ /* Compare source and destination memory locations for data */
+ for (size_t i = 0U; i < (cpy_range / 8U); i++) {
+ if (mmio_read_64(start_address + 8 * i) !=
+ mmio_read_64(target_address + 8 * i)) {
+ ERROR("SMMUv3: Mem copy failed: %lx\n", target_address + 8 * i);
+ return false;
+ }
}
}
- return true;
+ return ret;
+}
+
+static bool run_smmuv3_rand48(uintptr_t start_address, size_t size, uint32_t attributes)
+{
+ return run_testengine(ENGINE_RAND48, start_address, 0, size / FRAME_COUNT, attributes);
}
CACTUS_CMD_HANDLER(smmuv3_cmd, CACTUS_DMA_SMMUv3_CMD)
{
- smc_ret_values ffa_ret;
- ffa_vm_id_t vm_id = ffa_dir_msg_dest(*args);
- ffa_vm_id_t source = ffa_dir_msg_source(*args);
+ ffa_id_t vm_id = ffa_dir_msg_dest(*args);
+ ffa_id_t source = ffa_dir_msg_source(*args);
+ uint32_t operation = args->arg4;
+ uintptr_t start_address = args->arg5;
+ size_t size = args->arg6;
+ uint32_t attributes = args->arg7;
- VERBOSE("Received request through direct message for DMA service\n");
+ VERBOSE("Received request through direct message for DMA service.\n");
/*
* At present, the test cannot be run concurrently on multiple SPs as
@@ -165,11 +174,21 @@ CACTUS_CMD_HANDLER(smmuv3_cmd, CACTUS_DMA_SMMUv3_CMD)
return cactus_error_resp(vm_id, source, 0);
}
- if (run_smmuv3_test()) {
- ffa_ret = cactus_success_resp(vm_id, source, 0);
- } else {
- ffa_ret = cactus_error_resp(vm_id, source, 0);
+ switch (operation) {
+ case ENGINE_MEMCPY:
+ if (run_smmuv3_memcpy(start_address, size, attributes)) {
+ return cactus_success_resp(vm_id, source, 0);
+ }
+ break;
+ case ENGINE_RAND48:
+ if (run_smmuv3_rand48(start_address, size, attributes)) {
+ return cactus_success_resp(vm_id, source, 0);
+ }
+ break;
+ default:
+ ERROR("SMMUv3TestEngine: unsupported operation (%u).\n", operation);
+ break;
}
- return ffa_ret;
+ return cactus_error_resp(vm_id, source, 0);
}
diff --git a/spm/cactus/cactus_tests/smmuv3_test_engine.h b/spm/cactus/cactus_tests/smmuv3_test_engine.h
index 32d86ac9c..d3a3dcfcb 100644
--- a/spm/cactus/cactus_tests/smmuv3_test_engine.h
+++ b/spm/cactus/cactus_tests/smmuv3_test_engine.h
@@ -32,6 +32,7 @@
/* Offset of various control fields belonging to User Frame */
#define CMD_OFF (0x0U)
#define UCTRL_OFF (0x4U)
+#define ATTR_OFF (0x20U)
#define SEED_OFF (0x24U)
#define BEGIN_OFF (0x28U)
#define END_CTRL_OFF (0x30U)
diff --git a/spm/cactus/cactus_tests_memory_attributes.c b/spm/cactus/cactus_tests_memory_attributes.c
deleted file mode 100644
index bed6f0b55..000000000
--- a/spm/cactus/cactus_tests_memory_attributes.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <errno.h>
-#include <platform_def.h>
-#include <secure_partition.h>
-#include <sp_helpers.h>
-#include <sprt_svc.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <xlat_tables_defs.h>
-
-#include "cactus.h"
-#include "cactus_def.h"
-#include "cactus_tests.h"
-
-/* This is filled at runtime. */
-static uintptr_t cactus_tests_start;
-static uintptr_t cactus_tests_end;
-static uintptr_t cactus_tests_size;
-
-/*
- * Send an SPRT_MEMORY_PERM_ATTR_SET_AARCH64 SVC with the given arguments.
- * Return the return value of the SVC.
- */
-static int32_t request_mem_attr_changes(uintptr_t base_address,
- int pages_count,
- uint32_t memory_access_controls)
-{
- INFO("Requesting memory attributes change\n");
- INFO(" Start address : %p\n", (void *) base_address);
- INFO(" Number of pages: %i\n", pages_count);
- INFO(" Attributes : 0x%x\n", memory_access_controls);
-
- svc_args svc_values = { SPRT_MEMORY_PERM_ATTR_SET_AARCH64,
- base_address,
- pages_count,
- memory_access_controls };
- return sp_svc(&svc_values);
-}
-
-/*
- * Send an SPRT_MEMORY_PERM_ATTR_GET_AARCH64 SVC with the given arguments.
- * Return the return value of the SVC.
- */
-static int32_t request_get_mem_attr(uintptr_t base_address)
-{
- INFO("Requesting memory attributes\n");
- INFO(" Base address : %p\n", (void *) base_address);
-
- svc_args svc_values = { SPRT_MEMORY_PERM_ATTR_GET_AARCH64,
- base_address };
- return sp_svc(&svc_values);
-}
-
-/*
- * This function expects a base address and number of pages identifying the
- * extents of some memory region mapped as non-executable, read-only.
- *
- * 1) It changes its data access permissions to read-write.
- * 2) It checks this memory can now be written to.
- * 3) It restores the original data access permissions.
- *
- * If any check fails, it loops forever. It could also trigger a permission
- * fault while trying to write to the memory.
- */
-static void mem_attr_changes_unittest(uintptr_t addr, int pages_count)
-{
- int32_t ret;
- uintptr_t end_addr = addr + pages_count * PAGE_SIZE;
- uint32_t old_attr, new_attr;
-
- char test_desc[50];
-
- snprintf(test_desc, sizeof(test_desc),
- "RO -> RW (%i page(s) from address 0x%lx)", pages_count, addr);
- announce_test_start(test_desc);
-
- /*
- * Ensure we don't change the attributes of some random memory
- * location
- */
- assert(addr >= cactus_tests_start);
- assert(end_addr < (cactus_tests_start + cactus_tests_size));
-
- old_attr = SPRT_MEMORY_PERM_ATTR_RO;
- /* Memory was read-only, let's try changing that to RW */
- new_attr = SPRT_MEMORY_PERM_ATTR_RW;
-
- ret = request_mem_attr_changes(addr, pages_count, new_attr);
- expect(ret, SPRT_SUCCESS);
- printf("Successfully changed memory attributes\n");
-
- /* The attributes should be the ones we have just written. */
- ret = request_get_mem_attr(addr);
- expect(ret, SPRT_SUCCESS | (new_attr << SPRT_MEMORY_PERM_ATTR_SHIFT));
-
- /* If it worked, we should be able to write to this memory now! */
- for (unsigned char *data = (unsigned char *) addr;
- (uintptr_t) data != end_addr;
- ++data) {
- *data = 42;
- }
- printf("Successfully wrote to the memory\n");
-
- /* Let's revert back to the original attributes for the next test */
- ret = request_mem_attr_changes(addr, pages_count, old_attr);
- expect(ret, SPRT_SUCCESS);
- printf("Successfully restored the old attributes\n");
-
- /* The attributes should be the original ones again. */
- ret = request_get_mem_attr(addr);
- expect(ret, SPRT_SUCCESS | (old_attr << SPRT_MEMORY_PERM_ATTR_SHIFT));
-
- announce_test_end(test_desc);
-}
-
-/*
- * Exercise the ability of the Trusted Firmware to change the data access
- * permissions and instruction execution permissions of some memory region.
- */
-void mem_attr_changes_tests(void)
-{
- uint32_t attributes;
- int32_t ret;
- uintptr_t addr;
-
- cactus_tests_start = CACTUS_TEST_MEM_BASE;
- cactus_tests_size = CACTUS_TEST_MEM_SIZE;
- cactus_tests_end = cactus_tests_start + cactus_tests_size;
-
- const char *test_sect_desc = "memory attributes changes";
-
- announce_test_section_start(test_sect_desc);
- /*
- * Start with error cases, i.e. requests that are expected to be denied
- */
- const char *test_desc1 = "Reserved attributes value";
-
- announce_test_start(test_desc1);
- attributes = U(3);
- ret = request_mem_attr_changes(cactus_tests_start, 1, attributes);
- expect(ret, SPRT_INVALID_PARAMETER);
- announce_test_end(test_desc1);
-
- const char *test_desc2 = "Size == 0";
-
- announce_test_start(test_desc2);
- attributes = SPRT_MEMORY_PERM_ATTR_RW;
- ret = request_mem_attr_changes(cactus_tests_start, 0, attributes);
- expect(ret, SPRT_INVALID_PARAMETER);
- announce_test_end(test_desc2);
-
- const char *test_desc3 = "Unaligned address";
-
- announce_test_start(test_desc3);
- attributes = SPRT_MEMORY_PERM_ATTR_RW;
- /* Choose an address not aligned to a page boundary. */
- addr = cactus_tests_start + 5;
- ret = request_mem_attr_changes(addr, 1, attributes);
- expect(ret, SPRT_INVALID_PARAMETER);
- announce_test_end(test_desc3);
-
- const char *test_desc4 = "Unmapped memory region";
-
- announce_test_start(test_desc4);
- addr = cactus_tests_end + 2 * PAGE_SIZE;
- attributes = SPRT_MEMORY_PERM_ATTR_RW;
- ret = request_mem_attr_changes(addr, 3, attributes);
- expect(ret, SPRT_INVALID_PARAMETER);
- announce_test_end(test_desc4);
-
- const char *test_desc5 = "Partially unmapped memory region";
-
- announce_test_start(test_desc5);
- addr = cactus_tests_end - 2 * PAGE_SIZE;
- attributes = SPRT_MEMORY_PERM_ATTR_RW;
- ret = request_mem_attr_changes(addr, 6, attributes);
- expect(ret, SPRT_INVALID_PARAMETER);
- announce_test_end(test_desc5);
-
- const char *test_desc6 = "Memory region mapped with the wrong granularity";
-
- announce_test_start(test_desc6);
- /*
- * This address is usually mapped at a 2 MiB granularity. By using as
- * test address the block after the console we make sure that in case
- * the attributes of the block actually changed, the console would work
- * and we would get the error message.
- */
- addr = ((uintptr_t)PLAT_ARM_UART_BASE + 0x200000ULL) & ~(0x200000ULL - 1ULL);
- attributes = SPRT_MEMORY_PERM_ATTR_RW;
- ret = request_mem_attr_changes(addr, 1, attributes);
- expect(ret, SPRT_INVALID_PARAMETER);
- announce_test_end(test_desc6);
-
- const char *test_desc7 = "Try some valid memory change requests";
-
- announce_test_start(test_desc7);
- for (unsigned int i = 0; i < 20; ++i) {
- /*
- * Choose some random address in the pool of memory reserved
- * for these tests.
- */
- const int pages_max = cactus_tests_size / PAGE_SIZE;
- int pages_count = bound_rand(1, pages_max);
-
- addr = bound_rand(
- cactus_tests_start,
- cactus_tests_end - (pages_count * PAGE_SIZE));
- /* Align to PAGE_SIZE. */
- addr &= ~(PAGE_SIZE - 1);
-
- mem_attr_changes_unittest(addr, pages_count);
- }
- announce_test_end(test_desc7);
-
- announce_test_section_end(test_sect_desc);
-}
diff --git a/spm/cactus/cactus_tests_system_setup.c b/spm/cactus/cactus_tests_system_setup.c
deleted file mode 100644
index cead8e3be..000000000
--- a/spm/cactus/cactus_tests_system_setup.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <debug.h>
-#include <sp_helpers.h>
-#include <stdint.h>
-
-#include "cactus.h"
-
-extern uintptr_t __TEXT_START__;
-
-void system_setup_tests(void)
-{
- const char *test_sect_desc = "system setup";
-
- announce_test_section_start(test_sect_desc);
-
- /*
- * Try accessing CTR_EL0 register. This should work if SCTLR_EL1.UCT bit
- * has been correctly setup by TF.
- */
- const char *test_desc1 = "Read CTR_EL0 register";
-
- announce_test_start(test_desc1);
-
- uint32_t ctr __unused = read_ctr_el0();
-
- INFO("CTR_EL0 = 0x%x\n", ctr);
- announce_test_end(test_desc1);
-
- /*
- * Try to execute a cache maintenance instruction. This should work if
- * SCTLR_EL1.UCI bit has been correctly setup by TF.
- */
- const char *test_desc2 = "Access to cache maintenance operations";
-
- announce_test_start(test_desc2);
- flush_dcache_range((uintptr_t)&__TEXT_START__, 1);
- announce_test_end(test_desc2);
-
- /*
- * Try accessing a floating point register. This should not trap to
- * S-EL1.
- */
- const char *test_desc3 = "Access to FP regs";
-
- announce_test_start(test_desc3);
- /*
- * Can't use the 'double' type here because Cactus (like the rest of
- * the TF code) is compiled with GCC's -mgeneral-regs-only compiler flag
- * that disables floating point support in GCC.
- */
- uint64_t fp_reg;
-
- __asm__ volatile("fmov %0, d0" : "=r" (fp_reg) :: "d0");
- INFO("D0 = 0x%llx\n", fp_reg);
- __asm__ volatile(
- "fmov d0, #1.0 \n\t"
- "fmov %0, d0 \n\t"
- : "=r" (fp_reg)
- :
- : "d0");
- INFO("D0 = 0x%llx\n", fp_reg);
- announce_test_end(test_desc3);
-
- announce_test_section_end(test_sect_desc);
-}
diff --git a/spm/cactus/plat/arm/fvp/fdts/cactus-secondary.dts b/spm/cactus/plat/arm/fvp/fdts/cactus-secondary.dts
index 661684bd7..1c01a9fbe 100644
--- a/spm/cactus/plat/arm/fvp/fdts/cactus-secondary.dts
+++ b/spm/cactus/plat/arm/fvp/fdts/cactus-secondary.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
@@ -15,8 +15,8 @@
/* Properties */
description = "Base-1";
- ffa-version = <0x00010000>; /* 31:16 - Major, 15:0 - Minor */
- uuid = <0xd1582309 0xf02347b9 0x827c4464 0xf5578fc8>;
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0x092358d1 0xb94723f0 0x64447c82 0xc88f57f5>;
id = <2>;
auxiliary-id = <0xae>;
stream-endpoint-ids = <0 1 2 3>;
@@ -24,14 +24,12 @@
exception-level = <2>; /* S-EL1 */
execution-state = <0>; /* AARCH64 */
load-address = <0x7100000>;
- entrypoint-offset = <0x00001000>;
+ entrypoint-offset = <0x00004000>;
xlat-granule = <0>; /* 4KiB */
- boot-order = <0>;
- messaging-method = <0>; /* Direct messaging only */
- run-time-model = <0>; /* Run to completion */
-
- /* Boot protocol */
- gp-register-num = <0x0>;
+ boot-order = <1>;
+ messaging-method = <3>; /* Direct messaging only */
+ ns-interrupts-action = <2>; /* Non secure interrupts are signaled. */
+ notification-support; /* Support receipt of notifications. */
rx_tx-info {
compatible = "arm,ffa-manifest-rx_tx-buffer";
@@ -56,28 +54,12 @@
attributes = <0x3>; /* read-write */
};
- /* Without optional base-address */
- test-memory {
- description = "test-memory";
- pages-count = <4>;
- attributes = <0x7>; /* read-write-execute */
- };
- };
-
- device-regions {
- compatible = "arm,ffa-manifest-device-regions";
-
- test-reg {
- /* Dummy values */
- base-address = <0x00000000 0x24000000>;
- pages-count = <16>;
+ /* Memory to be shared in memory sharing tests. */
+ share-memory {
+ description = "share-memory";
+ pages-count = <1>;
+ base-address = <0x00000000 0x7501000>;
attributes = <0x3>; /* read-write */
- reg = <0x10000008 0x00000001 1>;
- smmu-id = <1>;
- stream-ids = <0x0 0x1>;
- interrupts = <0x2 0x3>,
- <0x4 0x5>;
};
};
-
};
diff --git a/spm/cactus/plat/arm/fvp/fdts/cactus-tertiary.dts b/spm/cactus/plat/arm/fvp/fdts/cactus-tertiary.dts
index ea7d5d6ff..946301d8d 100644
--- a/spm/cactus/plat/arm/fvp/fdts/cactus-tertiary.dts
+++ b/spm/cactus/plat/arm/fvp/fdts/cactus-tertiary.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
@@ -15,49 +15,40 @@
/* Properties */
description = "Base-1";
- ffa-version = <0x00010000>; /* 31:16 - Major, 15:0 - Minor */
- uuid = <0x79b55c73 0x1d8c44b9 0x859361e1 0x770ad8d2>;
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0x735cb579 0xb9448c1d 0xe1619385 0xd2d80a77>;
id = <3>;
auxiliary-id = <0xae>;
stream-endpoint-ids = <0 1 2 3>;
- execution-ctx-count = <8>;
+ execution-ctx-count = <1>;
exception-level = <2>; /* S-EL1 */
execution-state = <0>; /* AARCH64 */
load-address = <0x7200000>;
- entrypoint-offset = <0x00001000>;
+ entrypoint-offset = <0x00004000>;
xlat-granule = <0>; /* 4KiB */
- boot-order = <0>;
- messaging-method = <0>; /* Direct messaging only */
- run-time-model = <0>; /* Run to completion */
-
- /* Boot protocol */
- gp-register-num = <0x0>;
+ boot-order = <2>;
+ notification-support;
+ messaging-method = <3>; /* Direct messaging only */
+ managed-exit; /* Managed exit is supported */
+ managed-exit-virq;
memory-regions {
compatible = "arm,ffa-manifest-memory-regions";
- /* Without optional base-address */
- test-memory {
- description = "test-memory";
- pages-count = <4>;
- attributes = <0x7>; /* read-write-execute */
+ /* Memory to be shared in memory sharing tests. */
+ share-memory {
+ description = "share-memory";
+ pages-count = <1>;
+ base-address = <0x00000000 0x7502000>;
+ attributes = <0x3>; /* read-write */
};
- };
- device-regions {
- compatible = "arm,ffa-manifest-device-regions";
-
- test-reg {
- /* Dummy values */
- base-address = <0x00000000 0x25000000>;
- pages-count = <16>;
- attributes = <0x3>; /* read-write */
- reg = <0x10000008 0x00000001 1>;
- smmu-id = <1>;
- stream-ids = <0x0 0x1>;
- interrupts = <0x2 0x3>,
- <0x4 0x5>;
+ /* Memory to be shared in memory sharing tests. */
+ share-memory-ns {
+ description = "NS share memory";
+ pages-count = <1>;
+ base-address = <0x00008800 0x80001000>;
+ attributes = <0xb>; /* NS / read-write */
};
};
-
};
diff --git a/spm/cactus/plat/arm/fvp/fdts/cactus.dts b/spm/cactus/plat/arm/fvp/fdts/cactus.dts
index 1c28fde49..3effb39d7 100644
--- a/spm/cactus/plat/arm/fvp/fdts/cactus.dts
+++ b/spm/cactus/plat/arm/fvp/fdts/cactus.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
@@ -15,8 +15,8 @@
/* Properties */
description = "Base-1";
- ffa-version = <0x00010000>; /* 31:16 - Major, 15:0 - Minor */
- uuid = <0xb4b5671e 0x4a904fe1 0xb81ffb13 0xdae1dacb>;
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0x1e67b5b4 0xe14f904a 0x13fb1fb8 0xcbdae1da>;
id = <1>;
auxiliary-id = <0xae>;
stream-endpoint-ids = <0 1 2 3>;
@@ -24,14 +24,21 @@
exception-level = <2>; /* S-EL1 */
execution-state = <0>; /* AARCH64 */
load-address = <0x7000000>;
- entrypoint-offset = <0x00001000>;
+ entrypoint-offset = <0x00002000>;
xlat-granule = <0>; /* 4KiB */
boot-order = <0>;
- messaging-method = <3>; /* Direct messaging with managed exit */
- run-time-model = <0>; /* Run to completion */
+ messaging-method = <3>; /* Direct messaging only */
+ ns-interrupts-action = <1>; /* Managed exit is supported */
+ notification-support; /* Support receipt of notifications. */
/* Boot protocol */
- gp-register-num = <0x0>;
+ gp-register-num = <0>;
+
+ /* Boot Info */
+ boot-info {
+ compatible = "arm,ffa-manifest-boot-info";
+ ffa_manifest;
+ };
rx_tx-info {
compatible = "arm,ffa-manifest-rx_tx-buffer";
@@ -56,11 +63,12 @@
attributes = <0x3>; /* read-write */
};
- /* Without optional base-address */
- test-memory {
- description = "test-memory";
- pages-count = <4>;
- attributes = <0x7>; /* read-write-execute */
+ /* Memory to be shared in memory sharing tests. */
+ share-memory {
+ description = "share-memory";
+ pages-count = <1>;
+ base-address =<0x00000000 0x7500000>;
+ attributes = <0x3>; /* read-write */
};
/*
@@ -72,6 +80,9 @@
pages-count = <4>;
base-address = <0x00000000 0x7400000>;
attributes = <0x3>; /* read-write */
+ smmu-id = <0>;
+ stream-ids = <0x0 0x1>;
+ stream-ids-access-permissions = <0x3 0x3>;
};
smmuv3-memcpy-dst {
@@ -79,18 +90,25 @@
pages-count = <4>;
base-address = <0x00000000 0x7404000>;
attributes = <0x3>; /* read-write */
+ smmu-id = <0>;
+ stream-ids = <0x0 0x1>;
+ stream-ids-access-permissions = <0x3 0x3>;
+ };
+
+ smmuv3-ns-region {
+ description = "smmuv3-memcpy-ns-region";
+ pages-count = <8>;
+ base-address = <0x0 0x90000000>;
+ attributes = <0xb>; /* ns-read-write */
+ smmu-id = <0>;
+ stream-ids = <0x0 0x1>;
+ stream-ids-access-permissions = <0xb 0xb>;
};
};
device-regions {
compatible = "arm,ffa-manifest-device-regions";
- uart2 {
- base-address = <0x00000000 0x1c0b0000>;
- pages-count = <1>;
- attributes = <0x3>; /* read-write */
- };
-
smmuv3-testengine {
/*
* SMMUv3TestEngine is a DMA IP modeled in the
@@ -105,18 +123,26 @@
stream-ids = <0x0 0x1>;
};
- test-reg {
- /* Dummy Values */
- base-address = <0x00000000 0x22000000>;
- pages-count = <64>;
+ sec_wdog {
+ /* SP805 Trusted Watchdog Module */
+ base-address = <0x00000000 0x2a490000>;
+ pages-count = <32>; /* Two 64KB pages */
attributes = <0x3>; /* read-write */
- reg = <0x10000008 0x00000001 1>;
- smmu-id = <1>;
- stream-ids = <0x0 0x1>;
- interrupts = <0x2 0x3>,
- <0x4 0x5>;
- exclusive-access;
+ interrupts = <56 0x900>;
};
- };
+ espi_test_node {
+ /*
+ * A dummy device region node representing a fake
+ * peripheral. Explicitly used to specify properties of
+ * interrupt 5000, in the extended SPI range, used for
+ * testing purposes. All the properties below except
+ * `interrupts` are irrelevant.
+ */
+ base-address = <0x00000000 0x1c130000>;
+ pages-count = <1>; /* One 4KB page */
+ attributes = <0x1>; /* read-only */
+ interrupts = <5000 0x900>;
+ };
+ };
};
diff --git a/spm/cactus/plat/arm/fvp/include/cactus_platform_def.h b/spm/cactus/plat/arm/fvp/include/cactus_platform_def.h
deleted file mode 100644
index 8940c83f5..000000000
--- a/spm/cactus/plat/arm/fvp/include/cactus_platform_def.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <platform_def.h>
-
-#ifndef CACTUS_PLATFORM_DEF_H
-#define CACTUS_PLATFORM_DEF_H
-
-#define PLAT_ARM_DEVICE0_BASE DEVICE0_BASE
-#define PLAT_ARM_DEVICE0_SIZE DEVICE0_SIZE
-
-#define CACTUS_PL011_UART_BASE PL011_UART2_BASE
-#define CACTUS_PL011_UART_CLK_IN_HZ PL011_UART2_CLK_IN_HZ
-
-#define PLAT_CACTUS_RX_BASE ULL(0x7300000)
-#define PLAT_CACTUS_CORE_COUNT (8U)
-
-/* Scratch memory used for SMMUv3 driver testing purposes in Cactus SP */
-#define PLAT_CACTUS_MEMCPY_BASE ULL(0x7400000)
-#define PLAT_CACTUS_MEMCPY_RANGE ULL(0x8000)
-
-#define CACTUS_PRIMARY_EC_COUNT (8U)
-#define CACTUS_SECONDARY_EC_COUNT (8U)
-#define CACTUS_TERTIARY_EC_COUNT (1U)
-
-/* Base address of user and PRIV frames in SMMUv3TestEngine */
-#define USR_BASE_FRAME ULL(0x2BFE0000)
-#define PRIV_BASE_FRAME ULL(0x2BFF0000)
-
-#endif /* CACTUS_PLATFORM_DEF_H */
diff --git a/spm/cactus/plat/arm/fvp/include/sp_platform_def.h b/spm/cactus/plat/arm/fvp/include/sp_platform_def.h
new file mode 100644
index 000000000..bb57ce86a
--- /dev/null
+++ b/spm/cactus/plat/arm/fvp/include/sp_platform_def.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains common defines for a secure partition. The correct
+ * platform_def.h header file is selected according to the secure partition
+ * and platform being built using the make scripts.
+ */
+
+#ifndef SP_PLATFORM_DEF_H
+#define SP_PLATFORM_DEF_H
+
+#include <platform_def.h>
+
+#define PLAT_SP_RX_BASE ULL(0x7300000)
+#define PLAT_SP_CORE_COUNT U(8)
+
+#define PLAT_ARM_DEVICE0_BASE DEVICE0_BASE
+#define PLAT_ARM_DEVICE0_SIZE DEVICE0_SIZE
+
+/* Scratch memory used for SMMUv3 driver testing purposes in Cactus SP */
+#define PLAT_CACTUS_MEMCPY_BASE ULL(0x7400000)
+#define PLAT_CACTUS_NS_MEMCPY_BASE ULL(0x90000000)
+#define PLAT_CACTUS_MEMCPY_RANGE ULL(0x8000)
+
+/* Base address of user and PRIV frames in SMMUv3TestEngine */
+#define USR_BASE_FRAME ULL(0x2BFE0000)
+#define PRIV_BASE_FRAME ULL(0x2BFF0000)
+
+/* Base address for memory sharing tests. */
+#define CACTUS_SP1_MEM_SHARE_BASE 0x7500000
+#define CACTUS_SP2_MEM_SHARE_BASE 0x7501000
+#define CACTUS_SP3_MEM_SHARE_BASE 0x7502000
+#define CACTUS_SP3_NS_MEM_SHARE_BASE 0x880080001000ULL
+
+#endif /* SP_PLATFORM_DEF_H */
diff --git a/spm/cactus/plat/arm/tc0/fdts/cactus-secondary.dts b/spm/cactus/plat/arm/tc/fdts/cactus-secondary.dts
index 5b90eb8ff..839fb8228 100644
--- a/spm/cactus/plat/arm/tc0/fdts/cactus-secondary.dts
+++ b/spm/cactus/plat/arm/tc/fdts/cactus-secondary.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
@@ -15,22 +15,20 @@
/* Properties */
description = "cactus-2";
- ffa-version = <0x00010000>; /* 31:16 - Major, 15:0 - Minor */
- uuid = <0xd1582309 0xf02347b9 0x827c4464 0xf5578fc8>;
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0x092358d1 0xb94723f0 0x64447c82 0xc88f57f5>;
id = <2>;
execution-ctx-count = <8>;
exception-level = <2>; /* S-EL1 */
execution-state = <0>; /* AARCH64 */
load-address = <0xfe100000>;
- entrypoint-offset = <0x00001000>;
+ entrypoint-offset = <0x00004000>;
xlat-granule = <0>; /* 4KiB */
- boot-order = <0>;
- messaging-method = <0>; /* Direct messaging only */
+ boot-order = <1>;
+ messaging-method = <3>; /* Direct messaging only */
+ notification-support; /* Support receipt of notifications. */
run-time-model = <1>; /* Run to completion */
- /* Boot protocol */
- gp-register-num = <0x0>;
-
rx_tx-info {
compatible = "arm,ffa-manifest-rx_tx-buffer";
rx-buffer = <&rxbuffer>;
@@ -53,5 +51,13 @@
base-address = <0x00000000 0xfe303000>;
attributes = <0x3>; /* read-write */
};
+
+ /* Memory to be shared in memory sharing tests. */
+ share-memory {
+ description = "share-memory";
+ pages-count = <1>;
+ base-address =<0x00000000 0xfe501000>;
+ attributes = <0x3>; /* read-write */
+ };
};
};
diff --git a/spm/cactus/plat/arm/tc0/fdts/cactus-tertiary.dts b/spm/cactus/plat/arm/tc/fdts/cactus-tertiary.dts
index 3b50530e3..1db2c1e1f 100644
--- a/spm/cactus/plat/arm/tc0/fdts/cactus-tertiary.dts
+++ b/spm/cactus/plat/arm/tc/fdts/cactus-tertiary.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
@@ -15,30 +15,29 @@
/* Properties */
description = "cactus-3";
- ffa-version = <0x00010000>; /* 31:16 - Major, 15:0 - Minor */
- uuid = <0x79b55c73 0x1d8c44b9 0x859361e1 0x770ad8d2>;
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0x735cb579 0xb9448c1d 0xe1619385 0xd2d80a77>;
id = <3>;
execution-ctx-count = <1>;
exception-level = <2>; /* S-EL1 */
execution-state = <0>; /* AARCH64 */
load-address = <0xfe200000>;
- entrypoint-offset = <0x00001000>;
+ entrypoint-offset = <0x00004000>;
xlat-granule = <0>; /* 4KiB */
- boot-order = <0>;
- messaging-method = <0>; /* Direct messaging only */
+ boot-order = <2>;
+ messaging-method = <3>; /* Direct messaging only */
+ notification-support; /* Support receipt of notifications. */
run-time-model = <1>; /* Run to completion */
- /* Boot protocol */
- gp-register-num = <0x0>;
-
memory-regions {
compatible = "arm,ffa-manifest-memory-regions";
- /* Without optional base-address */
- test-memory {
- description = "test-memory";
- pages-count = <4>;
- attributes = <0x7>; /* read-write-execute */
+ /* Memory to be shared in memory sharing tests. */
+ share-memory {
+ description = "share-memory";
+ pages-count = <1>;
+ base-address =<0x00000000 0xfe502000>;
+ attributes = <0x3>; /* read-write */
};
};
diff --git a/spm/cactus/plat/arm/tc0/fdts/cactus.dts b/spm/cactus/plat/arm/tc/fdts/cactus.dts
index bd7c54654..31b9e8e88 100644
--- a/spm/cactus/plat/arm/tc0/fdts/cactus.dts
+++ b/spm/cactus/plat/arm/tc/fdts/cactus.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
@@ -15,21 +15,29 @@
/* Properties */
description = "cactus-1";
- ffa-version = <0x00010000>; /* 31:16 - Major, 15:0 - Minor */
- uuid = <0xb4b5671e 0x4a904fe1 0xb81ffb13 0xdae1dacb>;
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0x1e67b5b4 0xe14f904a 0x13fb1fb8 0xcbdae1da>;
id = <1>;
execution-ctx-count = <8>;
exception-level = <2>; /* S-EL1 */
execution-state = <0>; /* AARCH64 */
load-address = <0xfe000000>;
- entrypoint-offset = <0x00001000>;
+ entrypoint-offset = <0x00002000>;
xlat-granule = <0>; /* 4KiB */
boot-order = <0>;
- messaging-method = <0>; /* Direct messaging only */
+ messaging-method = <3>; /* Direct messaging only */
+ notification-support; /* Support receipt of notifications. */
+ managed-exit; /* Managed exit supported */
run-time-model = <1>; /* Run to completion */
/* Boot protocol */
- gp-register-num = <0x0>;
+ gp-register-num = <0>;
+
+ /* Boot Info */
+ boot-info {
+ compatible = "arm,ffa-manifest-boot-info";
+ ffa_manifest;
+ };
rx_tx-info {
compatible = "arm,ffa-manifest-rx_tx-buffer";
@@ -53,13 +61,12 @@
base-address = <0x00000000 0xfe301000>;
attributes = <0x3>; /* read-write */
};
- };
- device-regions {
- compatible = "arm,ffa-manifest-device-regions";
- uart2 {
- base-address = <0x00000000 0x7FF80000>;
+ /* Memory to be shared in memory sharing tests. */
+ share-memory {
+ description = "share-memory";
pages-count = <1>;
+ base-address =<0x00000000 0xfe500000>;
attributes = <0x3>; /* read-write */
};
};
diff --git a/spm/cactus/plat/arm/tc/include/sp_platform_def.h b/spm/cactus/plat/arm/tc/include/sp_platform_def.h
new file mode 100644
index 000000000..b3a3514ab
--- /dev/null
+++ b/spm/cactus/plat/arm/tc/include/sp_platform_def.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains common defines for a secure partition. The correct
+ * platform_def.h header file is selected according to the secure partition
+ * and platform being built using the make scripts.
+ */
+
+#ifndef SP_PLATFORM_DEF_H
+#define SP_PLATFORM_DEF_H
+
+#include <platform_def.h>
+
+#define PLAT_SP_RX_BASE ULL(0xfe300000)
+#define PLAT_SP_CORE_COUNT U(8)
+
+#define PLAT_ARM_DEVICE0_BASE TC_DEVICE0_BASE
+#define PLAT_ARM_DEVICE0_SIZE TC_DEVICE0_SIZE
+
+/* Scratch memory used for SMMUv3 driver testing purposes in Cactus SP */
+/* SMMUv3 tests are disabled for TC platform */
+#define PLAT_CACTUS_MEMCPY_BASE ULL(0xfe400000)
+#define PLAT_CACTUS_NS_MEMCPY_BASE ULL(0x90000000)
+#define PLAT_CACTUS_MEMCPY_RANGE ULL(0x8000)
+
+/* Base address of user and PRIV frames in SMMUv3TestEngine */
+/* SMMUv3 tests are disabled for TC platform */
+#define USR_BASE_FRAME ULL(0x0)
+#define PRIV_BASE_FRAME ULL(0x0)
+
+/* Base address for memory sharing tests. */
+#define CACTUS_SP1_MEM_SHARE_BASE 0xfe500000
+#define CACTUS_SP2_MEM_SHARE_BASE 0xfe501000
+#define CACTUS_SP3_MEM_SHARE_BASE 0xfe502000
+#define CACTUS_SP3_NS_MEM_SHARE_BASE 0x880080001000ULL
+
+#endif /* SP_PLATFORM_DEF_H */
diff --git a/spm/cactus/plat/arm/tc/platform.mk b/spm/cactus/plat/arm/tc/platform.mk
new file mode 100644
index 000000000..f3c8b3ee7
--- /dev/null
+++ b/spm/cactus/plat/arm/tc/platform.mk
@@ -0,0 +1,17 @@
+#
+# Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TC_CACTUS_BASE = spm/cactus/plat/arm/tc
+
+PLAT_INCLUDES += -I${TC_CACTUS_BASE}/include/
+
+# Add the FDT source
+CACTUS_DTS = ${TC_CACTUS_BASE}/fdts/cactus.dts
+
+# List of FDTS to copy
+FDTS_CP_LIST = ${TC_CACTUS_BASE}/fdts/cactus.dts
+FDTS_CP_LIST += ${TC_CACTUS_BASE}/fdts/cactus-secondary.dts
+FDTS_CP_LIST += ${TC_CACTUS_BASE}/fdts/cactus-tertiary.dts
diff --git a/spm/cactus/plat/arm/tc0/include/cactus_platform_def.h b/spm/cactus/plat/arm/tc0/include/cactus_platform_def.h
deleted file mode 100644
index 42dd29150..000000000
--- a/spm/cactus/plat/arm/tc0/include/cactus_platform_def.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <platform_def.h>
-
-#ifndef CACTUS_PLATFORM_DEF_H
-#define CACTUS_PLATFORM_DEF_H
-
-#define PLAT_ARM_DEVICE0_BASE TC0_DEVICE0_BASE
-#define PLAT_ARM_DEVICE0_SIZE TC0_DEVICE0_SIZE
-
-#define CACTUS_PL011_UART_BASE PL011_UART1_BASE
-#define CACTUS_PL011_UART_CLK_IN_HZ PL011_UART1_CLK_IN_HZ
-
-#define PLAT_CACTUS_RX_BASE ULL(0xfe300000)
-
-#define CACTUS_PRIMARY_EC_COUNT (8U)
-#define CACTUS_SECONDARY_EC_COUNT (8U)
-#define CACTUS_TERTIARY_EC_COUNT (1U)
-
-#endif /* CACTUS_PLATFORM_DEF_H */
diff --git a/spm/cactus/plat/arm/tc0/platform.mk b/spm/cactus/plat/arm/tc0/platform.mk
deleted file mode 100644
index 3bc245e76..000000000
--- a/spm/cactus/plat/arm/tc0/platform.mk
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-TC0_CACTUS_BASE = spm/cactus/plat/arm/tc0
-
-PLAT_INCLUDES += -I${TC0_CACTUS_BASE}/include/
-
-# Add the FDT source
-CACTUS_DTS = ${TC0_CACTUS_BASE}/fdts/cactus.dts
-
-# List of FDTS to copy
-FDTS_CP_LIST = ${TC0_CACTUS_BASE}/fdts/cactus.dts
-FDTS_CP_LIST += ${TC0_CACTUS_BASE}/fdts/cactus-secondary.dts
-FDTS_CP_LIST += ${TC0_CACTUS_BASE}/fdts/cactus-tertiary.dts
diff --git a/spm/cactus_mm/cactus_mm.mk b/spm/cactus_mm/cactus_mm.mk
index 3156c1cff..9d98d6265 100644
--- a/spm/cactus_mm/cactus_mm.mk
+++ b/spm/cactus_mm/cactus_mm.mk
@@ -59,9 +59,6 @@ $(eval $(call add_define,CACTUS_MM_DEFINES,ARM_ARCH_MINOR))
$(eval $(call add_define,CACTUS_MM_DEFINES,DEBUG))
$(eval $(call add_define,CACTUS_MM_DEFINES,ENABLE_BTI))
$(eval $(call add_define,CACTUS_MM_DEFINES,ENABLE_PAUTH))
-$(eval $(call add_define,CACTUS_MM_DEFINES,FVP_CLUSTER_COUNT))
-$(eval $(call add_define,CACTUS_MM_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
-$(eval $(call add_define,CACTUS_MM_DEFINES,FVP_MAX_PE_PER_CPU))
$(eval $(call add_define,CACTUS_MM_DEFINES,LOG_LEVEL))
$(eval $(call add_define,CACTUS_MM_DEFINES,PLAT_${PLAT}))
diff --git a/spm/cactus/cactus_debug.c b/spm/common/sp_debug.c
index 30a25278d..9c91c5603 100644
--- a/spm/cactus/cactus_debug.c
+++ b/spm/common/sp_debug.c
@@ -1,20 +1,38 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <drivers/arm/pl011.h>
#include <drivers/console.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <sp_debug.h>
#include <spm_helpers.h>
-#include "cactus.h"
-
static int (*putc_impl)(int);
static int putc_hypcall(int c)
{
- spm_debug_log((char)c);
+ hvc_args args = {
+ .fid = FFA_CONSOLE_LOG_SMC32,
+ .arg1 = 1,
+ .arg2 = c
+ };
+
+ (void)tftf_hvc(&args);
+ return c;
+}
+static int putc_ffacall(int c)
+{
+ struct ffa_value args = {
+ .fid = FFA_CONSOLE_LOG_SMC32,
+ .arg1 = 1,
+ .arg2 = c
+ };
+
+ ffa_service_call(&args);
return c;
}
@@ -30,10 +48,12 @@ void set_putc_impl(enum stdout_route route)
{
switch (route) {
- case HVC_CALL_AS_STDOUT:
+ case FFA_HVC_CALL_AS_STDOUT:
putc_impl = putc_hypcall;
return;
-
+ case FFA_SVC_SMC_CALL_AS_STDOUT:
+ putc_impl = putc_ffacall;
+ return;
case PL011_AS_STDOUT:
default:
break;
diff --git a/spm/common/sp_debug.h b/spm/common/sp_debug.h
new file mode 100644
index 000000000..49bf5e79c
--- /dev/null
+++ b/spm/common/sp_debug.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+enum stdout_route {
+ PL011_AS_STDOUT = 0,
+ FFA_HVC_CALL_AS_STDOUT,
+ FFA_SVC_SMC_CALL_AS_STDOUT,
+};
+
+void set_putc_impl(enum stdout_route);
diff --git a/spm/common/sp_def.h b/spm/common/sp_def.h
new file mode 100644
index 000000000..2b26b68c3
--- /dev/null
+++ b/spm/common/sp_def.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SP_DEF_H
+#define SP_DEF_H
+
+#include <utils_def.h>
+#include <sp_platform_def.h>
+
+/*
+ * Layout of the Secure Partition image.
+ */
+
+/* Up to 2 MiB at an arbitrary address that doesn't overlap the devices. */
+#define SP_IMAGE_BASE ULL(0x1000)
+#define SP_IMAGE_SIZE ULL(0x200000)
+
+/* Memory reserved for stacks */
+#define SP_STACKS_SIZE ULL(0x1000)
+
+/*
+ * RX/TX buffer used by VM's in SPM for memory sharing
+ * Each VM allocated 2 pages, one for RX and one for TX buffer.
+ */
+#define SP_RX_BASE PLAT_SP_RX_BASE
+#define SP_TX_BASE SP_RX_BASE + PAGE_SIZE
+#define SP_RX_TX_SIZE PAGE_SIZE * 2
+
+/*
+ * RX/TX buffer helpers.
+ */
+#define get_sp_rx_start(sp_id) (SP_RX_BASE \
+ + (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE))
+#define get_sp_rx_end(sp_id) (SP_RX_BASE \
+ + (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE) \
+ + PAGE_SIZE)
+#define get_sp_tx_start(sp_id) (SP_TX_BASE + \
+ (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE))
+#define get_sp_tx_end(sp_id) (SP_TX_BASE \
+ + (((sp_id & 0x7FFFU) - 1U) * SP_RX_TX_SIZE) \
+ + PAGE_SIZE)
+
+#endif /* SP_DEF_H */
diff --git a/spm/common/sp_helpers.c b/spm/common/sp_helpers.c
index a6b6bc5d9..eeb0d1925 100644
--- a/spm/common/sp_helpers.c
+++ b/spm/common/sp_helpers.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -13,6 +13,10 @@
#include "sp_helpers.h"
+spinlock_t sp_handler_lock[NUM_VINT_ID];
+
+void (*sp_interrupt_handler[NUM_VINT_ID])(void);
+
uintptr_t bound_rand(uintptr_t min, uintptr_t max)
{
/*
@@ -58,16 +62,57 @@ void announce_test_end(const char *test_desc)
INFO("Test \"%s\" end.\n", test_desc);
}
-void sp_sleep(uint32_t ms)
+uint64_t sp_sleep_elapsed_time(uint32_t ms)
{
uint64_t timer_freq = read_cntfrq_el0();
VERBOSE("%s: Timer frequency = %llu\n", __func__, timer_freq);
VERBOSE("%s: Sleeping for %u milliseconds...\n", __func__, ms);
- uint64_t time1 = read_cntvct_el0();
+
+ uint64_t time1 = virtualcounter_read();
volatile uint64_t time2 = time1;
+
while ((time2 - time1) < ((ms * timer_freq) / 1000U)) {
- time2 = read_cntvct_el0();
+ time2 = virtualcounter_read();
+ }
+
+ return ((time2 - time1) * 1000) / timer_freq;
+}
+
+void sp_sleep(uint32_t ms)
+{
+ (void)sp_sleep_elapsed_time(ms);
+}
+
+void sp_handler_spin_lock_init(void)
+{
+ for (uint32_t i = 0; i < NUM_VINT_ID; i++) {
+ init_spinlock(&sp_handler_lock[i]);
+ }
+}
+
+void sp_register_interrupt_handler(void (*handler)(void),
+ uint32_t interrupt_id)
+{
+ if (interrupt_id >= NUM_VINT_ID) {
+ ERROR("Cannot register handler for interrupt %u\n", interrupt_id);
+ panic();
}
+
+ spin_lock(&sp_handler_lock[interrupt_id]);
+ sp_interrupt_handler[interrupt_id] = handler;
+ spin_unlock(&sp_handler_lock[interrupt_id]);
+}
+
+void sp_unregister_interrupt_handler(uint32_t interrupt_id)
+{
+ if (interrupt_id >= NUM_VINT_ID) {
+ ERROR("Cannot unregister handler for interrupt %u\n", interrupt_id);
+ panic();
+ }
+
+ spin_lock(&sp_handler_lock[interrupt_id]);
+ sp_interrupt_handler[interrupt_id] = NULL;
+ spin_unlock(&sp_handler_lock[interrupt_id]);
}
diff --git a/spm/common/sp_helpers.h b/spm/common/sp_helpers.h
index 399200a01..a82924b8e 100644
--- a/spm/common/sp_helpers.h
+++ b/spm/common/sp_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -9,7 +9,14 @@
#include <stdint.h>
#include <tftf_lib.h>
-#include <ffa_helpers.h>
+#include <spm_common.h>
+#include <spinlock.h>
+
+/*
+ * Use extended SPI interrupt ID range, hafnium/SPMC maps virtual interrupts
+ * to physical interrupts 1 to 1.
+ */
+#define NUM_VINT_ID 5120
typedef struct {
u_register_t fid;
@@ -56,7 +63,26 @@ void announce_test_section_end(const char *test_sect_desc);
void announce_test_start(const char *test_desc);
void announce_test_end(const char *test_desc);
+/* Sleep for at least 'ms' milliseconds and return the elapsed time(ms). */
+uint64_t sp_sleep_elapsed_time(uint32_t ms);
+
/* Sleep for at least 'ms' milliseconds. */
void sp_sleep(uint32_t ms);
+void sp_handler_spin_lock_init(void);
+
+/* Handler invoked by SP while processing interrupt. */
+extern void (*sp_interrupt_handler[NUM_VINT_ID])(void);
+
+/* Register the handler. */
+void sp_register_interrupt_handler(void (*handler)(void),
+ uint32_t interrupt_id);
+
+/* Un-register the handler. */
+void sp_unregister_interrupt_handler(uint32_t interrupt_id);
+
+void discover_managed_exit_interrupt_id(void);
+
+void register_maintenance_interrupt_handlers(void);
+
#endif /* SP_HELPERS_H */
diff --git a/spm/common/sp_tests/sp_test_ffa.c b/spm/common/sp_tests/sp_test_ffa.c
new file mode 100644
index 000000000..219b14947
--- /dev/null
+++ b/spm/common/sp_tests/sp_test_ffa.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "ffa_helpers.h"
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+
+#include <sp_def.h>
+#include <ffa_endpoints.h>
+#include <sp_helpers.h>
+#include <spm_helpers.h>
+#include <spm_common.h>
+#include <lib/libc/string.h>
+
+/* FFA version test helpers */
+#define FFA_MAJOR 1U
+#define FFA_MINOR 2U
+
+static uint32_t spm_version;
+
+static const struct ffa_uuid sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}, {IVY_UUID}, {EL3_SPMD_LP_UUID}
+ };
+
+static const struct ffa_partition_info ffa_expected_partition_info[] = {
+ /* Primary partition info */
+ {
+ .id = SP_ID(1),
+ .exec_context = PRIMARY_EXEC_CTX_COUNT,
+ .properties = (FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_DIRECT_REQ_SEND |
+ FFA_PARTITION_NOTIFICATION),
+ .uuid = {PRIMARY_UUID}
+ },
+ /* Secondary partition info */
+ {
+ .id = SP_ID(2),
+ .exec_context = SECONDARY_EXEC_CTX_COUNT,
+ .properties = (FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_DIRECT_REQ_SEND |
+ FFA_PARTITION_NOTIFICATION),
+ .uuid = {SECONDARY_UUID}
+ },
+ /* Tertiary partition info */
+ {
+ .id = SP_ID(3),
+ .exec_context = TERTIARY_EXEC_CTX_COUNT,
+ .properties = (FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_DIRECT_REQ_SEND |
+ FFA_PARTITION_NOTIFICATION),
+ .uuid = {TERTIARY_UUID}
+ },
+ /* Ivy partition info */
+ {
+ .id = SP_ID(4),
+ .exec_context = IVY_EXEC_CTX_COUNT,
+ .properties = (FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_DIRECT_REQ_SEND),
+ .uuid = {IVY_UUID}
+ },
+ /* EL3 SPMD logical partition */
+ {
+ .id = SP_ID(0x7FC0),
+ .exec_context = EL3_SPMD_LP_EXEC_CTX_COUNT,
+ .properties = (FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_SEND),
+ .uuid = {EL3_SPMD_LP_UUID}
+ },
+};
+
+/*
+ * Test FFA_FEATURES interface.
+ */
+static void ffa_features_test(void)
+{
+ struct ffa_value ffa_ret;
+ unsigned int expected_ret;
+ const struct ffa_features_test *ffa_feature_test_target;
+ unsigned int i, test_target_size =
+ get_ffa_feature_test_target(&ffa_feature_test_target);
+ struct ffa_features_test test_target;
+
+ INFO("Test FFA_FEATURES.\n");
+
+ for (i = 0U; i < test_target_size; i++) {
+ test_target = ffa_feature_test_target[i];
+
+ ffa_ret = ffa_features_with_input_property(test_target.feature,
+ test_target.param);
+ expected_ret = FFA_VERSION_COMPILED
+ >= test_target.version_added ?
+ test_target.expected_ret : FFA_ERROR;
+
+ if (ffa_func_id(ffa_ret) != expected_ret) {
+ ERROR("Unexpected return: %x (expected %x)."
+ " FFA_FEATURES test: %s.\n",
+ ffa_func_id(ffa_ret), expected_ret,
+ test_target.test_name);
+ }
+
+ if (expected_ret == FFA_ERROR) {
+ if (ffa_error_code(ffa_ret) !=
+ FFA_ERROR_NOT_SUPPORTED) {
+ ERROR("Unexpected error code: %x (expected %x)."
+ " FFA_FEATURES test: %s.\n",
+ ffa_error_code(ffa_ret), expected_ret,
+ test_target.test_name);
+ }
+ }
+ }
+}
+
+static void ffa_partition_info_wrong_test(void)
+{
+ const struct ffa_uuid uuid = { .uuid = {1} };
+ struct ffa_value ret = ffa_partition_info_get(uuid);
+
+ VERBOSE("%s: test request wrong UUID.\n", __func__);
+
+ expect(ffa_func_id(ret), FFA_ERROR);
+ expect(ffa_error_code(ret), FFA_ERROR_INVALID_PARAMETER);
+}
+
+static void ffa_partition_info_get_regs_test(void)
+{
+ struct ffa_value ret = { 0 };
+
+ VERBOSE("FF-A Partition Info regs interface tests\n");
+ ret = ffa_version(MAKE_FFA_VERSION(1, 2));
+ uint32_t version = ret.fid;
+
+ if (version == FFA_ERROR_NOT_SUPPORTED) {
+ ERROR("FFA_VERSION 1.2 not supported, skipping"
+ " FFA_PARTITION_INFO_GET_REGS test.\n");
+ return;
+ }
+
+ ret = ffa_features(FFA_PARTITION_INFO_GET_REGS_SMC64);
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
+ ERROR("FFA_PARTITION_INFO_GET_REGS not supported skipping tests.\n");
+ return;
+ }
+
+ expect(ffa_partition_info_regs_helper(sp_uuids[3],
+ &ffa_expected_partition_info[3], 1), true);
+ expect(ffa_partition_info_regs_helper(sp_uuids[2],
+ &ffa_expected_partition_info[2], 1), true);
+ expect(ffa_partition_info_regs_helper(sp_uuids[1],
+ &ffa_expected_partition_info[1], 1), true);
+ expect(ffa_partition_info_regs_helper(sp_uuids[0],
+ &ffa_expected_partition_info[0], 1), true);
+
+ /*
+ * Check partition information if there is support for SPMD EL3
+ * partitions. calling partition_info_get_regs with the SPMD EL3
+ * UUID successfully, indicates the presence of it (there is no
+ * spec defined way to discover presence of el3 spmd logical
+ * partitions). If the call fails with a not supported error,
+ * we assume they dont exist and skip further tests to avoid
+ * failures on platforms without el3 spmd logical partitions.
+ */
+ ret = ffa_partition_info_get_regs(sp_uuids[4], 0, 0);
+ if ((ffa_func_id(ret) == FFA_ERROR) &&
+ ((ffa_error_code(ret) == FFA_ERROR_NOT_SUPPORTED) ||
+ (ffa_error_code(ret) == FFA_ERROR_INVALID_PARAMETER))) {
+ INFO("Skipping register based EL3 SPMD Logical partition"
+ " discovery\n");
+ expect(ffa_partition_info_regs_helper(NULL_UUID,
+ ffa_expected_partition_info,
+ (ARRAY_SIZE(ffa_expected_partition_info) - 1)), true);
+ } else {
+ expect(ffa_partition_info_regs_helper(sp_uuids[4],
+ &ffa_expected_partition_info[4], 1), true);
+ expect(ffa_partition_info_regs_helper(NULL_UUID,
+ ffa_expected_partition_info,
+ ARRAY_SIZE(ffa_expected_partition_info)), true);
+ }
+}
+
+static void ffa_partition_info_get_test(struct mailbox_buffers *mb)
+{
+ INFO("Test FFA_PARTITION_INFO_GET.\n");
+
+ expect(ffa_partition_info_helper(mb, sp_uuids[2],
+ &ffa_expected_partition_info[2], 1), true);
+
+ expect(ffa_partition_info_helper(mb, sp_uuids[1],
+ &ffa_expected_partition_info[1], 1), true);
+
+ expect(ffa_partition_info_helper(mb, sp_uuids[0],
+ &ffa_expected_partition_info[0], 1), true);
+
+ /*
+ * TODO: ffa_partition_info_get_regs returns EL3 SPMD LP information
+ * but partition_info_get does not. Ignore the last entry, that is
+ * assumed to be the EL3 SPMD LP information. ffa_partition_info_get
+ * uses the rx/tx buffer and the SPMD does not support the use of
+ * rx/tx buffer to return SPMD logical partition information.
+ */
+ expect(ffa_partition_info_helper(mb, NULL_UUID,
+ ffa_expected_partition_info,
+ (ARRAY_SIZE(ffa_expected_partition_info) - 1)), true);
+
+ ffa_partition_info_wrong_test();
+}
+
+void ffa_version_test(void)
+{
+ struct ffa_value ret = ffa_version(MAKE_FFA_VERSION(FFA_MAJOR,
+ FFA_MINOR));
+
+ spm_version = (uint32_t)ret.fid;
+
+ bool ffa_version_compatible =
+ ((spm_version >> FFA_VERSION_MAJOR_SHIFT) == FFA_MAJOR &&
+ (spm_version & FFA_VERSION_MINOR_MASK) >= FFA_MINOR);
+
+ INFO("Test FFA_VERSION. Return %u.%u; Compatible: %i\n",
+ spm_version >> FFA_VERSION_MAJOR_SHIFT,
+ spm_version & FFA_VERSION_MINOR_MASK,
+ (int)ffa_version_compatible);
+
+ expect((int)ffa_version_compatible, (int)true);
+}
+
+void ffa_spm_id_get_test(void)
+{
+ if (spm_version >= MAKE_FFA_VERSION(1, 1)) {
+ struct ffa_value ret = ffa_spm_id_get();
+
+ expect(ffa_func_id(ret), FFA_SUCCESS_SMC32);
+
+ ffa_id_t spm_id = ffa_endpoint_id(ret);
+
+ INFO("Test FFA_SPM_ID_GET. Return: 0x%x\n", spm_id);
+
+ /*
+ * Check the SPMC value given in the fvp_spmc_manifest
+ * is returned.
+ */
+ expect(spm_id, SPMC_ID);
+ } else {
+ INFO("FFA_SPM_ID_GET not supported in this version of FF-A."
+ " Test skipped.\n");
+ }
+}
+
+void ffa_tests(struct mailbox_buffers *mb)
+{
+ const char *test_ffa = "FF-A setup and discovery";
+
+ announce_test_section_start(test_ffa);
+
+ ffa_features_test();
+ ffa_version_test();
+ ffa_spm_id_get_test();
+ ffa_partition_info_get_test(mb);
+ ffa_partition_info_get_regs_test();
+
+ announce_test_section_end(test_ffa);
+}
diff --git a/spm/common/sp_tests/sp_tests.h b/spm/common/sp_tests/sp_tests.h
new file mode 100644
index 000000000..10d3b9bcc
--- /dev/null
+++ b/spm/common/sp_tests/sp_tests.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2017-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CACTUS_TESTS_H
+#define CACTUS_TESTS_H
+
+#include <spm_common.h>
+
+/*
+ * Test functions
+ */
+
+void ffa_tests(struct mailbox_buffers *mb);
+
+#endif /* CACTUS_TESTS_H */
diff --git a/spm/common/spm_helpers.c b/spm/common/spm_helpers.c
index 2ccf3f7d5..b2a4709ba 100644
--- a/spm/common/spm_helpers.c
+++ b/spm/common/spm_helpers.c
@@ -21,16 +21,6 @@ uint32_t spm_interrupt_get(void)
return ret.ret0;
}
-void spm_debug_log(char c)
-{
- hvc_args args = {
- .fid = SPM_DEBUG_LOG,
- .arg1 = c
- };
-
- (void)tftf_hvc(&args);
-}
-
/**
* Hypervisor call to enable/disable SP delivery of a virtual interrupt of
* int_id value through the IRQ or FIQ vector (pin).
@@ -49,3 +39,33 @@ int64_t spm_interrupt_enable(uint32_t int_id, bool enable, enum interrupt_pin pi
return (int64_t)ret.ret0;
}
+
+/**
+ * Hypervisor call to drop the priority and de-activate a secure interrupt.
+ * Returns 0 on success, or -1 if passing an invalid interrupt id.
+ */
+int64_t spm_interrupt_deactivate(uint32_t vint_id)
+{
+ hvc_args args = {
+ .fid = SPM_INTERRUPT_DEACTIVATE,
+ .arg1 = vint_id, /* pint_id */
+ .arg2 = vint_id
+ };
+
+ hvc_ret_values ret = tftf_hvc(&args);
+
+ return (int64_t)ret.ret0;
+}
+
+/**
+ * Return vCPU index for the currently running vCPU.
+ * Virtual MPIDR holds the linear vCPU index information in lower bits.
+ * Keep only first 24 bits (mapping to Aff0/Aff1/Aff2).
+ * Omit Aff3, bit [31], U[30], MT[24].
+ */
+unsigned int spm_get_my_core_pos(void)
+{
+ uint64_t mpidr = read_mpidr_el1();
+
+ return (unsigned int)(mpidr & 0xffffff);
+}
diff --git a/spm/common/spm_helpers.h b/spm/common/spm_helpers.h
index 10f73161c..59cdaf124 100644
--- a/spm/common/spm_helpers.h
+++ b/spm/common/spm_helpers.h
@@ -13,7 +13,7 @@
/* Should match with IDs defined in SPM/Hafnium */
#define SPM_INTERRUPT_ENABLE (0xFF03)
#define SPM_INTERRUPT_GET (0xFF04)
-#define SPM_DEBUG_LOG (0xBD000000)
+#define SPM_INTERRUPT_DEACTIVATE (0xFF08)
/*
* Hypervisor Calls Wrappers
@@ -21,6 +21,8 @@
uint32_t spm_interrupt_get(void);
int64_t spm_interrupt_enable(uint32_t int_id, bool enable, enum interrupt_pin pin);
-void spm_debug_log(char c);
+int64_t spm_interrupt_deactivate(uint32_t vint_id);
+
+unsigned int spm_get_my_core_pos(void);
#endif /* SPMC_H */
diff --git a/spm/include/sp_res_desc_def.h b/spm/include/sp_res_desc_def.h
deleted file mode 100644
index 68df297a8..000000000
--- a/spm/include/sp_res_desc_def.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef SPM_RES_DESC_DEFS_H
-#define SPM_RES_DESC_DEFS_H
-
-#include <utils_def.h>
-
-/*******************************************************************************
- * Attribute Section
- ******************************************************************************/
-
-#define RD_ATTR_TYPE_UP_MIGRATABLE U(0)
-#define RD_ATTR_TYPE_UP_PINNED U(2)
-#define RD_ATTR_TYPE_MP U(1)
-
-#define RD_ATTR_RUNTIME_SEL0 U(0)
-#define RD_ATTR_RUNTIME_SEL1 U(1)
-
-#define RD_ATTR_INIT_ONLY U(0)
-#define RD_ATTR_RUNTIME U(1)
-
-#define RD_ATTR_PANIC_RESTART U(0)
-#define RD_ATTR_PANIC_ONESHOT U(1)
-
-#define RD_ATTR_XLAT_GRANULE_4KB U(0)
-#define RD_ATTR_XLAT_GRANULE_16KB U(1)
-#define RD_ATTR_XLAT_GRANULE_64KB U(2)
-
-/*******************************************************************************
- * Memory Region Section
- ******************************************************************************/
-
-#define RD_MEM_REGION_NAME_LEN U(32)
-
-#define RD_MEM_DEVICE U(0)
-#define RD_MEM_NORMAL_CODE U(1)
-#define RD_MEM_NORMAL_DATA U(2)
-#define RD_MEM_NORMAL_BSS U(3)
-#define RD_MEM_NORMAL_RODATA U(4)
-#define RD_MEM_NORMAL_SPM_SP_SHARED_MEM U(5)
-#define RD_MEM_NORMAL_CLIENT_SHARED_MEM U(6)
-#define RD_MEM_NORMAL_MISCELLANEOUS U(7)
-
-#define RD_MEM_MASK U(15)
-
-#define RD_MEM_IS_PIE (U(1) << 4)
-
-/*******************************************************************************
- * Notification Section
- ******************************************************************************/
-
-#define RD_NOTIF_TYPE_PLATFORM (U(0) << 31)
-#define RD_NOTIF_TYPE_INTERRUPT (U(1) << 31)
-
-#define RD_NOTIF_PLAT_ID_MASK U(0xFFFF)
-#define RD_NOTIF_PLAT_ID_SHIFT U(0)
-
-#define RD_NOTIF_PLATFORM(id) \
- (RD_NOTIF_TYPE_PLATFORM \
- | (((id) & RD_NOTIF_PLAT_ID_MASK) << RD_NOTIF_PLAT_ID_SHIFT))
-
-#define RD_NOTIF_IRQ_NUM_MASK U(0xFFFF)
-#define RD_NOTIF_IRQ_NUM_SHIFT U(0)
-#define RD_NOTIF_IRQ_PRIO_MASK U(0xFF)
-#define RD_NOTIF_IRQ_PRIO_SHIFT U(16)
-
-#define RD_NOTIF_IRQ_EDGE_FALLING U(0)
-#define RD_NOTIF_IRQ_EDGE_RISING U(2)
-#define RD_NOTIF_IRQ_LEVEL_LOW U(1)
-#define RD_NOTIF_IRQ_LEVEL_HIGH U(3)
-#define RD_NOTIF_IRQ_TRIGGER_SHIFT U(24)
-
-#define RD_NOTIF_IRQ(num, prio, trig) \
- (RD_NOTIF_TYPE_IRQ \
- | (((num) & RD_NOTIF_IRQ_NUM_MASK) << RD_NOTIF_IRQ_NUM_SHIFT) \
- | (((prio) & RD_NOTIF_IRQ_PRIO_MASK) << RD_NOTIF_IRQ_PRIO_SHIFT) \
- | (((trig) << RD_NOTIF_IRQ_TRIGGER_SHIFT)))
-
-/*******************************************************************************
- * Service Description Section
- ******************************************************************************/
-
-#define RD_SERV_ACCESS_SECURE (U(1) << 0)
-#define RD_SERV_ACCESS_EL3 (U(1) << 1)
-#define RD_SERV_ACCESS_NORMAL (U(1) << 2)
-
-#define RD_SERV_SUPPORT_BLOCKING (U(1) << 0)
-#define RD_SERV_SUPPORT_NON_BLOCKING (U(1) << 0)
-
-#endif /* SPM_RES_DESC_DEFS_H */
diff --git a/spm/ivy/aarch64/ivy_entrypoint.S b/spm/ivy/aarch64/ivy_entrypoint.S
deleted file mode 100644
index c6cb8b34b..000000000
--- a/spm/ivy/aarch64/ivy_entrypoint.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-#include <ivy_def.h>
-#include <platform_def.h>
-
- .globl ivy_entrypoint
-
-.section .bss.stacks
- .balign CACHE_WRITEBACK_GRANULE
- .fill IVY_STACKS_SIZE
-stacks_end:
-
-func ivy_entrypoint
-
- /* Setup the stack pointer. */
- adr x0, stacks_end
- mov sp, x0
-
- /* And jump to the C entrypoint. */
- b ivy_main
-
-endfunc ivy_entrypoint
diff --git a/spm/ivy/app/aarch64/ivy_entrypoint.S b/spm/ivy/app/aarch64/ivy_entrypoint.S
new file mode 100644
index 000000000..062225c74
--- /dev/null
+++ b/spm/ivy/app/aarch64/ivy_entrypoint.S
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <ivy_def.h>
+#include <platform_def.h>
+
+ .globl ivy_entrypoint
+
+.section .bss.stacks
+ .balign CACHE_WRITEBACK_GRANULE
+ .fill IVY_STACKS_SIZE
+stacks_end:
+
+/* Call FFA_MEM_PERM_SET_32 to set the permissions of a given memory region. */
+.macro ffa_mem_perm_set start:req end:req perm:req
+ adrp x29, \start
+ add x29, x29, :lo12:\start
+
+ adrp x30, \end
+ add x30, x30, :lo12:\end
+
+ /* x30 = end - start */
+ sub x30, x30, x29
+ /* x28 = x30 >> 12 (number of pages) */
+ mov x28, #12
+ lsrv x28, x30, x28
+
+ /* 0x84000089 is function identifier for FFA_MEM_PERM_SET_32 */
+ mov w0, #0x89
+ movk w0, 0x8400, lsl #16
+ mov x1, x29 /* Base VA */
+ mov x2, x28 /* Page count */
+ mov w3, #\perm /* Memory permissions */
+ svc #0
+
+ /* 0x84000061 is function identifier for FFA_SUCCESS_32 */
+ mov w1, #0x61
+ movk w1, #0x8400, lsl #16
+ cmp w1, w0
+ b.ne .
+.endm
+
+func ivy_entrypoint
+
+ /* Setup the stack pointer. */
+ adr x0, stacks_end
+ mov sp, x0
+
+#if IVY_SHIM == 0
+ /* RODATA+DATA+BSS marked RW so relocations can succeed. */
+ ffa_mem_perm_set __RODATA_START__ __BSS_END__ 5
+
+ /* Relocate symbols */
+ivy_pie_fixup:
+ mov x0, #0x1000
+ mov x1, #IVY_IMAGE_SIZE
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+
+ /* Clear S-EL0 partition BSS */
+ adrp x0, __BSS_START__
+ adrp x2, __BSS_END__
+ sub x2, x2, x0
+ mov x1, xzr
+ bl memset
+
+ /* Then mark RODATA as RO */
+ ffa_mem_perm_set __RODATA_START__ __RODATA_END__ 7
+#endif /* IVY_SHIM == 0 */
+
+ /* And jump to the C entrypoint. */
+ b ivy_main
+
+endfunc ivy_entrypoint
diff --git a/spm/ivy/app/ivy.h b/spm/ivy/app/ivy.h
new file mode 100644
index 000000000..a40f7e123
--- /dev/null
+++ b/spm/ivy/app/ivy.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef IVY_H
+#define IVY_H
+
+#include <stdint.h>
+
+/* Linker symbols used to figure out the memory layout of the S-EL1 shim. */
+extern uintptr_t __SHIM_TEXT_START__, __SHIM_TEXT_END__;
+#define SHIM_TEXT_START ((uintptr_t)&__SHIM_TEXT_START__)
+#define SHIM_TEXT_END ((uintptr_t)&__SHIM_TEXT_END__)
+
+extern uintptr_t __SHIM_RODATA_START__, __SHIM_RODATA_END__;
+#define SHIM_RODATA_START ((uintptr_t)&__SHIM_RODATA_START__)
+#define SHIM_RODATA_END ((uintptr_t)&__SHIM_RODATA_END__)
+
+extern uintptr_t __SHIM_DATA_START__, __SHIM_DATA_END__;
+#define SHIM_DATA_START ((uintptr_t)&__SHIM_DATA_START__)
+#define SHIM_DATA_END ((uintptr_t)&__SHIM_DATA_END__)
+
+extern uintptr_t __SHIM_BSS_START__, __SHIM_BSS_END__;
+#define SHIM_BSS_START ((uintptr_t)&__SHIM_BSS_START__)
+#define SHIM_BSS_END ((uintptr_t)&__SHIM_BSS_END__)
+
+/* Linker symbols used to figure out the memory layout of Ivy (S-EL0). */
+extern uintptr_t __TEXT_START__, __TEXT_END__;
+#define IVY_TEXT_START ((uintptr_t)&__TEXT_START__)
+#define IVY_TEXT_END ((uintptr_t)&__TEXT_END__)
+
+extern uintptr_t __RODATA_START__, __RODATA_END__;
+#define IVY_RODATA_START ((uintptr_t)&__RODATA_START__)
+#define IVY_RODATA_END ((uintptr_t)&__RODATA_END__)
+
+extern uintptr_t __DATA_START__, __DATA_END__;
+#define IVY_DATA_START ((uintptr_t)&__DATA_START__)
+#define IVY_DATA_END ((uintptr_t)&__DATA_END__)
+
+extern uintptr_t __BSS_START__, __BSS_END__;
+#define IVY_BSS_START ((uintptr_t)&__BSS_START__)
+#define IVY_BSS_END ((uintptr_t)&__BSS_END__)
+
+#endif /* __IVY_H__ */
diff --git a/spm/ivy/ivy_def.h b/spm/ivy/app/ivy_def.h
index 729c46dae..815a59e57 100644
--- a/spm/ivy/ivy_def.h
+++ b/spm/ivy/app/ivy_def.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -12,7 +12,7 @@
*/
/* Up to 2 MiB at an arbitrary address that doesn't overlap the devices. */
-#define IVY_IMAGE_BASE ULL(0x90000000)
+#define IVY_IMAGE_BASE ULL(0x1000)
#define IVY_IMAGE_SIZE ULL(0x200000)
/* Memory reserved for stacks */
diff --git a/spm/ivy/app/ivy_main.c b/spm/ivy/app/ivy_main.c
new file mode 100644
index 000000000..1e2cd6ab5
--- /dev/null
+++ b/spm/ivy/app/ivy_main.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <errno.h>
+#include <ffa_helpers.h>
+#include <sp_debug.h>
+#include <sp_helpers.h>
+
+#include "ivy.h"
+#include "sp_tests.h"
+
+/* Host machine information injected by the build system in the ELF file. */
+extern const char build_message[];
+extern const char version_string[];
+
+void __dead2 ivy_main(void)
+{
+ struct ffa_value ret;
+ ffa_id_t my_id;
+ struct mailbox_buffers mb;
+
+ set_putc_impl(FFA_SVC_SMC_CALL_AS_STDOUT);
+
+ /* Get FF-A id. */
+ ret = ffa_id_get();
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
+ ERROR("Cannot get FF-A id.\n");
+ panic();
+ }
+ my_id = ffa_endpoint_id(ret);
+
+ NOTICE("Booting Secure Partition (ID: %x)\n", my_id);
+ NOTICE("%s\n", build_message);
+ NOTICE("%s\n", version_string);
+
+init:
+ VERBOSE("Mapping RXTX Regions\n");
+ CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC32) {
+ ERROR("Failed to map RXTX buffers. Error %x\n",
+ ffa_error_code(ret));
+ panic();
+ }
+
+ ffa_tests(&mb);
+
+ ret = ffa_msg_wait();
+
+ while (1) {
+ if (ffa_func_id(ret) != FFA_MSG_SEND_DIRECT_REQ_SMC32) {
+ ERROR("unknown FF-A request %x\n", ffa_func_id(ret));
+ goto init;
+ }
+
+ VERBOSE("Received request: %lx\n", ret.arg3);
+
+ ret = ffa_msg_send_direct_resp32(my_id, ffa_dir_msg_source(ret),
+ 0, 0, 0, 0, 0);
+ }
+}
diff --git a/spm/ivy/app/plat/arm/fvp/fdts/ivy-sel0.dts b/spm/ivy/app/plat/arm/fvp/fdts/ivy-sel0.dts
new file mode 100644
index 000000000..7bf7d914d
--- /dev/null
+++ b/spm/ivy/app/plat/arm/fvp/fdts/ivy-sel0.dts
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * This file is a Partition Manifest (PM) for a minimal Secure Partition (SP)
+ * running in S-EL0 on top of Hafnium with VHE enabled (no S-EL1 shim included).
+ */
+
+
+/dts-v1/;
+
+/ {
+ compatible = "arm,ffa-manifest-1.0";
+
+ /* Properties */
+ description = "ivy-sel0-fvp";
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0xd883baea 0xaf4eafba 0xfdf74481 0xa744e5cb>;
+ execution-ctx-count = <1>;
+ exception-level = <1>; /* S-EL0 */
+ execution-state = <0>; /* AARCH64 */
+ load-address = <0x7600000>;
+ entrypoint-offset = <0x00004000>;
+ boot-order = <3>;
+ messaging-method = <3>; /* Direct messaging only */
+
+ /* Boot protocol */
+ gp-register-num = <0x0>;
+};
diff --git a/spm/ivy/app/plat/arm/fvp/fdts/ivy-sel1.dts b/spm/ivy/app/plat/arm/fvp/fdts/ivy-sel1.dts
new file mode 100644
index 000000000..a4d26ef2b
--- /dev/null
+++ b/spm/ivy/app/plat/arm/fvp/fdts/ivy-sel1.dts
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * This file is a Partition Manifest (PM) for a minimal Secure Partition (SP)
+ * intended to run at S-EL0 utilising a shim to run at S-EL1 on a
+ * non VHE enabled hafnium.
+ */
+
+
+/dts-v1/;
+
+/ {
+ compatible = "arm,ffa-manifest-1.0";
+
+ /* Properties */
+ description = "ivy-sel1-fvp";
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0xd883baea 0xaf4eafba 0xfdf74481 0xa744e5cb>;
+ execution-ctx-count = <1>;
+ exception-level = <2>; /* S-EL1 */
+ execution-state = <0>; /* AARCH64 */
+ load-address = <0x7600000>;
+ entrypoint-offset = <0x00004000>;
+ boot-order = <3>;
+ messaging-method = <3>; /* Direct messaging only */
+ /* Non-secure interrupts are signaled by default. */
+
+ /* Boot protocol */
+ gp-register-num = <0x0>;
+};
diff --git a/spm/ivy/app/plat/arm/fvp/include/sp_platform_def.h b/spm/ivy/app/plat/arm/fvp/include/sp_platform_def.h
new file mode 100644
index 000000000..b17f006f7
--- /dev/null
+++ b/spm/ivy/app/plat/arm/fvp/include/sp_platform_def.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains common defines for a secure partition. The correct
+ * platform_def.h header file is selected according to the secure partition
+ * and platform being built using the make scripts.
+ */
+
+#ifndef SP_PLATFORM_DEF_H
+#define SP_PLATFORM_DEF_H
+
+#define PLAT_SP_RX_BASE ULL(0x7300000)
+
+#endif /* SP_PLATFORM_DEF_H */
diff --git a/spm/ivy/app/plat/arm/fvp/platform.mk b/spm/ivy/app/plat/arm/fvp/platform.mk
new file mode 100644
index 000000000..3b9be3373
--- /dev/null
+++ b/spm/ivy/app/plat/arm/fvp/platform.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+FVP_IVY_BASE = spm/ivy/app/plat/arm/fvp
+
+PLAT_INCLUDES += -I${FVP_IVY_BASE}/include/
+
+# Add the FDT source
+ifeq ($(IVY_SHIM),0)
+IVY_DTS = ${FVP_IVY_BASE}/fdts/ivy-sel0.dts
+else
+IVY_DTS = ${FVP_IVY_BASE}/fdts/ivy-sel1.dts
+endif
+
+# List of FDTS to copy
+FDTS_CP_LIST = $(IVY_DTS)
diff --git a/spm/ivy/app/plat/arm/tc/fdts/ivy-sel0.dts b/spm/ivy/app/plat/arm/tc/fdts/ivy-sel0.dts
new file mode 100644
index 000000000..7d333a6c3
--- /dev/null
+++ b/spm/ivy/app/plat/arm/tc/fdts/ivy-sel0.dts
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * This file is a Partition Manifest (PM) for a minimal Secure Partition (SP)
+ * running in S-EL0 on top of Hafnium with VHE enabled (no S-EL1 shim included).
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "arm,ffa-manifest-1.0";
+
+ /* Properties */
+ description = "ivy-sel0-tc";
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0xd883baea 0xaf4eafba 0xfdf74481 0xa744e5cb>;
+ execution-ctx-count = <1>;
+ exception-level = <1>; /* S-EL0 */
+ execution-state = <0>; /* AARCH64 */
+ load-address = <0xfe280000>;
+ entrypoint-offset = <0x00004000>;
+ boot-order = <3>;
+ messaging-method = <3>; /* Direct messaging only */
+
+ /* Boot protocol */
+ gp-register-num = <0x0>;
+};
diff --git a/spm/ivy/app/plat/arm/tc/fdts/ivy-sel1.dts b/spm/ivy/app/plat/arm/tc/fdts/ivy-sel1.dts
new file mode 100644
index 000000000..6503e1556
--- /dev/null
+++ b/spm/ivy/app/plat/arm/tc/fdts/ivy-sel1.dts
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * This file is a Partition Manifest (PM) for a minimal Secure Partition (SP)
+ * intended to run at S-EL0 utilising a shim to run at S-EL1 on a
+ * non VHE enabled hadnium.
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "arm,ffa-manifest-1.0";
+
+ /* Properties */
+ description = "ivy-sel1-tc";
+ ffa-version = <0x00010002>; /* 31:16 - Major, 15:0 - Minor */
+ uuid = <0xd883baea 0xaf4eafba 0xfdf74481 0xa744e5cb>;
+ execution-ctx-count = <1>;
+ exception-level = <2>; /* S-EL1 */
+ execution-state = <0>; /* AARCH64 */
+ load-address = <0xfe280000>;
+ entrypoint-offset = <0x00004000>;
+ boot-order = <3>;
+ messaging-method = <3>; /* Direct messaging only */
+
+ /* Boot protocol */
+ gp-register-num = <0x0>;
+};
diff --git a/spm/ivy/app/plat/arm/tc/include/sp_platform_def.h b/spm/ivy/app/plat/arm/tc/include/sp_platform_def.h
new file mode 100644
index 000000000..326cb1342
--- /dev/null
+++ b/spm/ivy/app/plat/arm/tc/include/sp_platform_def.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file contains common defines for a secure partition. The correct
+ * platform_def.h header file is selected according to the secure partition
+ * and platform being built using the make scripts.
+ */
+
+#ifndef SP_PLATFORM_DEF_H
+#define SP_PLATFORM_DEF_H
+
+#define PLAT_SP_RX_BASE ULL(0x7300000)
+
+#endif /* SP_PLATFORM_DEF_H */
diff --git a/spm/ivy/app/plat/arm/tc/platform.mk b/spm/ivy/app/plat/arm/tc/platform.mk
new file mode 100644
index 000000000..21057060c
--- /dev/null
+++ b/spm/ivy/app/plat/arm/tc/platform.mk
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TC_IVY_BASE = spm/ivy/app/plat/arm/tc
+
+PLAT_INCLUDES += -I${TC_IVY_BASE}/include/
+
+# Add the FDT source
+ifeq ($(IVY_SHIM),0)
+IVY_DTS = ${TC_IVY_BASE}/fdts/ivy-sel0.dts
+else
+IVY_DTS = ${TC_IVY_BASE}/fdts/ivy-sel1.dts
+endif
+
+# List of FDTS to copy
+FDTS_CP_LIST = $(IVY_DTS)
diff --git a/spm/ivy/ivy.dts b/spm/ivy/ivy.dts
deleted file mode 100644
index 4c5a11a10..000000000
--- a/spm/ivy/ivy.dts
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <sp_res_desc_def.h>
-
-#include "ivy_def.h"
-
-/* 4 KiB pages */
-#define PAGE_SIZE U(0x1000)
-
-/*
- * FVP platform layout. The defines are hardcoded here because including the
- * platform headers have too many dependencies.
- * TODO: Move this to the platform layer.
- */
-#define V2M_IOFPGA_BASE ULL(0x1c000000)
-#define V2M_IOFPGA_SIZE ULL(0x03000000)
-
-/* Aggregate of all devices in the first GB. */
-#define DEVICE0_BASE ULL(0x20000000)
-#define DEVICE0_SIZE ULL(0x0c200000)
-
-/dts-v1/;
-
-/ {
- compatible = "arm,sp_rd";
-
- attribute {
- version = <0x00000001>;
- sp_type = <RD_ATTR_TYPE_UP_MIGRATABLE>;
- pe_mpidr = <0>; /* Unused */
- runtime_el = <RD_ATTR_RUNTIME_SEL0>;
- exec_type = <RD_ATTR_RUNTIME>;
- panic_policy = <RD_ATTR_PANIC_ONESHOT>;
- xlat_granule = <RD_ATTR_XLAT_GRANULE_4KB>;
- binary_size = <IVY_IMAGE_SIZE>;
- load_address = <0x00000000 IVY_IMAGE_BASE>;
- entrypoint = <0x00000000 IVY_IMAGE_BASE>;
- };
-
- memory_regions {
- v2m_iofpga {
- str = "V2M IOFPGA";
- base = <0x00000000 V2M_IOFPGA_BASE>;
- size = <0x00000000 V2M_IOFPGA_SIZE>;
- attr = <RD_MEM_DEVICE>;
- };
-
- device0 {
- str = "Device 0";
- base = <0x00000000 DEVICE0_BASE>;
- size = <0x00000000 DEVICE0_SIZE>;
- attr = <RD_MEM_DEVICE>;
- };
-
- spm_buffer {
- str = "SPM buffer";
- base = <0x00000000 IVY_SPM_BUF_BASE>;
- size = <0x00000000 IVY_SPM_BUF_SIZE>;
- attr = <RD_MEM_NORMAL_SPM_SP_SHARED_MEM>;
- };
-
- ns_buffer {
- str = "NS buffer";
- base = <0x00000000 IVY_NS_BUF_BASE>;
- size = <0x00000000 IVY_NS_BUF_SIZE>;
- attr = <RD_MEM_NORMAL_CLIENT_SHARED_MEM>;
- };
- };
-
- notifications {
- notification_0 {
- attr = <0>;
- pe = <0>;
- };
- };
-
- services {
- test_service_1 {
- uuid = <IVY_SERVICE1_UUID_RD>;
-
- accessibility = <(RD_SERV_ACCESS_SECURE |
- RD_SERV_ACCESS_EL3 |
- RD_SERV_ACCESS_NORMAL)>;
- request_type = <(RD_SERV_SUPPORT_BLOCKING |
- RD_SERV_SUPPORT_NON_BLOCKING)>;
- connection_quota = <10>;
- sec_mem_size = <0>;
- interrupt_num = <0>;
- };
- };
-};
diff --git a/spm/ivy/ivy.h b/spm/ivy/ivy.h
deleted file mode 100644
index c5cac2e03..000000000
--- a/spm/ivy/ivy.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef IVY_H
-#define IVY_H
-
-#include <stdint.h>
-
-/* Linker symbols used to figure out the memory layout of Ivy. */
-extern uintptr_t __TEXT_START__, __TEXT_END__;
-#define IVY_TEXT_START ((uintptr_t)&__TEXT_START__)
-#define IVY_TEXT_END ((uintptr_t)&__TEXT_END__)
-
-extern uintptr_t __RODATA_START__, __RODATA_END__;
-#define IVY_RODATA_START ((uintptr_t)&__RODATA_START__)
-#define IVY_RODATA_END ((uintptr_t)&__RODATA_END__)
-
-extern uintptr_t __DATA_START__, __DATA_END__;
-#define IVY_DATA_START ((uintptr_t)&__DATA_START__)
-#define IVY_DATA_END ((uintptr_t)&__DATA_END__)
-
-extern uintptr_t __BSS_START__, __BSS_END__;
-#define IVY_BSS_START ((uintptr_t)&__BSS_START__)
-#define IVY_BSS_END ((uintptr_t)&__BSS_END__)
-
-#endif /* __IVY_H__ */
diff --git a/spm/ivy/ivy.ld.S b/spm/ivy/ivy.ld.S
index 634db1551..b21201bac 100644
--- a/spm/ivy/ivy.ld.S
+++ b/spm/ivy/ivy.ld.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,7 +10,13 @@
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+
+#if IVY_SHIM == 1
+ENTRY(shim_entrypoint)
+#else
ENTRY(ivy_entrypoint)
+#endif
+
SECTIONS
{
@@ -19,6 +25,48 @@ SECTIONS
ASSERT(. == ALIGN(PAGE_SIZE),
"TEXT_START address is not aligned to PAGE_SIZE.")
+#if IVY_SHIM == 1
+
+ .shim_text : {
+ __SHIM_TEXT_START__ = .;
+ *spm_shim_entrypoint.o(.text*)
+ *(.vectors)
+ . = NEXT(PAGE_SIZE);
+ __SHIM_TEXT_END__ = .;
+ }
+
+ .shim_rodata : {
+ . = ALIGN(PAGE_SIZE);
+ __SHIM_RODATA_START__ = .;
+
+ . = NEXT(PAGE_SIZE);
+ __SHIM_RODATA_END__ = .;
+ }
+
+ .shim_data : {
+ . = ALIGN(PAGE_SIZE);
+ __SHIM_DATA_START__ = .;
+
+ . = NEXT(PAGE_SIZE);
+ __SHIM_DATA_END__ = .;
+ }
+
+ .shim_bss (NOLOAD) : {
+ . = ALIGN(PAGE_SIZE);
+ __SHIM_BSS_START__ = .;
+
+ *(.bss.shim_stacks)
+ *(.bss.tf_base_xlat_table)
+ *(.bss.tf_mmap)
+ *xlat_tables_context.o(COMMON)
+ *xlat_tables_context.o(xlat_table)
+
+ . = NEXT(PAGE_SIZE);
+ __SHIM_BSS_END__ = .;
+ }
+
+#endif
+
.text : {
__TEXT_START__ = .;
*ivy_entrypoint.o(.text*)
@@ -32,6 +80,17 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__RODATA_START__ = .;
*(.rodata*)
+
+ /*
+ * Keep the .got section in the RO section as it is patched
+ * prior to enabling the MMU, so having it in RO is better for
+ * security. GOT is a table of addresses so ensure 8-byte alignment.
+ */
+ . = ALIGN(8);
+ __GOT_START__ = .;
+ *(.got)
+ __GOT_END__ = .;
+
. = NEXT(PAGE_SIZE);
__RODATA_END__ = .;
}
@@ -44,6 +103,18 @@ SECTIONS
__DATA_END__ = .;
}
+ /*
+ * .rela.dyn needs to come after .data for the read-elf utility
+ * to parse this section correctly. Ensure 8-byte alignment so
+ * that the fields of RELA data structure are aligned.
+ */
+ . = ALIGN(8);
+ __RELA_START__ = .;
+ .rela.dyn . : {
+ }
+ __RELA_END__ = .;
+
+
.bss (NOLOAD) : {
. = ALIGN(PAGE_SIZE);
__BSS_START__ = .;
diff --git a/spm/ivy/ivy.mk b/spm/ivy/ivy.mk
index a5000496b..2e6cbfa8b 100644
--- a/spm/ivy/ivy.mk
+++ b/spm/ivy/ivy.mk
@@ -1,51 +1,85 @@
#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
include branch_protection.mk
-include lib/sprt/sprt_client.mk
+include lib/xlat_tables_v2/xlat_tables.mk
-IVY_DTB := $(BUILD_PLAT)/ivy.dtb
+# Include ivy platform Makefile
+IVY_PLAT_PATH := $(shell find spm/ivy/app/plat -wholename '*/${PLAT}')
+ifneq (${IVY_PLAT_PATH},)
+ include ${IVY_PLAT_PATH}/platform.mk
+endif
+
+IVY_SHIM := 1
+
+ifeq (${IVY_SHIM},1)
+ IVY_DTB := $(BUILD_PLAT)/ivy-sel1.dtb
+ SECURE_PARTITIONS += ivy_shim
+else
+ IVY_DTB := $(BUILD_PLAT)/ivy-sel0.dtb
+ SECURE_PARTITIONS += ivy
+endif
IVY_INCLUDES := \
+ -Itftf/framework/include \
-Iinclude \
-Iinclude/common \
-Iinclude/common/${ARCH} \
-Iinclude/lib \
-Iinclude/lib/${ARCH} \
- -Iinclude/lib/sprt \
-Iinclude/lib/utils \
-Iinclude/lib/xlat_tables \
+ -Iinclude/plat/common \
-Iinclude/runtime_services \
-Iinclude/runtime_services/secure_el0_payloads \
- -Ispm/ivy \
+ -Ispm/ivy/app \
+ -Ispm/ivy/shim \
-Ispm/common \
- ${SPRT_LIB_INCLUDES}
+ -Ispm/common/sp_tests/
IVY_SOURCES := \
- $(addprefix spm/ivy/, \
+ $(addprefix spm/ivy/app/, \
aarch64/ivy_entrypoint.S \
ivy_main.c \
) \
$(addprefix spm/common/, \
- aarch64/sp_arch_helpers.S \
+ sp_debug.c \
sp_helpers.c \
+ spm_helpers.c \
) \
+ $(addprefix spm/common/sp_tests/, \
+ sp_test_ffa.c \
+ )
-# TODO: Remove dependency on TFTF files.
+ifeq ($(IVY_SHIM),1)
IVY_SOURCES += \
- tftf/framework/debug.c \
- tftf/framework/${ARCH}/asm_debug.S
+ $(addprefix spm/ivy/shim/, \
+ aarch64/spm_shim_entrypoint.S \
+ aarch64/spm_shim_exceptions.S \
+ shim_main.c \
+ )
+endif
+
+# TODO: Remove dependency on TFTF files.
+IVY_SOURCES += \
+ tftf/framework/debug.c \
+ tftf/framework/${ARCH}/asm_debug.S \
+ tftf/tests/runtime_services/secure_service/${ARCH}/ffa_arch_helpers.S \
+ tftf/tests/runtime_services/secure_service/ffa_helpers.c \
+ tftf/tests/runtime_services/secure_service/spm_common.c
IVY_SOURCES += drivers/arm/pl011/${ARCH}/pl011_console.S \
- drivers/console/console.c \
lib/${ARCH}/cache_helpers.S \
lib/${ARCH}/misc_helpers.S \
+ lib/smc/${ARCH}/asm_smc.S \
+ lib/smc/${ARCH}/smc.c \
+ lib/smc/${ARCH}/hvc.c \
lib/locks/${ARCH}/spinlock.S \
lib/utils/mp_printf.c \
- ${SPRT_LIB_SOURCES}
+ ${XLAT_TABLES_LIB_SRCS}
IVY_LINKERFILE := spm/ivy/ivy.ld.S
@@ -57,19 +91,34 @@ $(eval $(call add_define,IVY_DEFINES,DEBUG))
$(eval $(call add_define,IVY_DEFINES,ENABLE_ASSERTIONS))
$(eval $(call add_define,IVY_DEFINES,ENABLE_BTI))
$(eval $(call add_define,IVY_DEFINES,ENABLE_PAUTH))
-$(eval $(call add_define,IVY_DEFINES,FVP_CLUSTER_COUNT))
-$(eval $(call add_define,IVY_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
-$(eval $(call add_define,IVY_DEFINES,FVP_MAX_PE_PER_CPU))
$(eval $(call add_define,IVY_DEFINES,LOG_LEVEL))
$(eval $(call add_define,IVY_DEFINES,PLAT_${PLAT}))
+$(eval $(call add_define,IVY_DEFINES,IVY_SHIM))
$(IVY_DTB) : $(BUILD_PLAT)/ivy $(BUILD_PLAT)/ivy/ivy.elf
-$(IVY_DTB) : spm/ivy/ivy.dts
- @echo " DTBGEN spm/ivy/ivy.dts"
+$(IVY_DTB) : $(IVY_DTS)
+ @echo " DTBGEN $@"
${Q}tools/generate_dtb/generate_dtb.sh \
- ivy spm/ivy/ivy.dts $(BUILD_PLAT)
+ ivy ${IVY_DTS} $(BUILD_PLAT) $(IVY_DTB)
@echo
@echo "Built $@ successfully"
@echo
-ivy: $(IVY_DTB)
+ivy: $(IVY_DTB) SP_LAYOUT
+
+# FDTS_CP copies flattened device tree sources
+# $(1) = output directory
+# $(2) = flattened device tree source file to copy
+define FDTS_CP
+ $(eval FDTS := $(addprefix $(1)/,$(notdir $(2))))
+FDTS_LIST += $(FDTS)
+$(FDTS): $(2) $(IVY_DTB)
+ @echo " CP $$<"
+ ${Q}cp $$< $$@
+endef
+
+ifdef FDTS_CP_LIST
+ $(eval files := $(filter %.dts,$(FDTS_CP_LIST)))
+ $(eval $(foreach file,$(files),$(call FDTS_CP,$(BUILD_PLAT),$(file))))
+ivy: $(FDTS_LIST)
+endif
diff --git a/spm/ivy/ivy_main.c b/spm/ivy/ivy_main.c
deleted file mode 100644
index 8542150ba..000000000
--- a/spm/ivy/ivy_main.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-#include <debug.h>
-#include <drivers/console.h>
-#include <drivers/arm/pl011.h>
-#include <errno.h>
-#include <ivy_def.h>
-#include <plat_arm.h>
-#include <platform_def.h>
-#include <sp_helpers.h>
-#include <sprt_client.h>
-#include <sprt_svc.h>
-
-#include "ivy.h"
-#include "ivy_def.h"
-
-/* Host machine information injected by the build system in the ELF file. */
-extern const char build_message[];
-extern const char version_string[];
-
-static void ivy_print_memory_layout(void)
-{
- NOTICE("Secure Partition memory layout:\n");
-
- NOTICE(" Image regions\n");
- NOTICE(" Text region : %p - %p\n",
- (void *)IVY_TEXT_START, (void *)IVY_TEXT_END);
- NOTICE(" Read-only data region : %p - %p\n",
- (void *)IVY_RODATA_START, (void *)IVY_RODATA_END);
- NOTICE(" Data region : %p - %p\n",
- (void *)IVY_DATA_START, (void *)IVY_DATA_END);
- NOTICE(" BSS region : %p - %p\n",
- (void *)IVY_BSS_START, (void *)IVY_BSS_END);
- NOTICE(" Total image memory : %p - %p\n",
- (void *)IVY_IMAGE_BASE,
- (void *)(IVY_IMAGE_BASE + IVY_IMAGE_SIZE));
- NOTICE(" SPM regions\n");
- NOTICE(" SPM <-> SP buffer : %p - %p\n",
- (void *)IVY_SPM_BUF_BASE,
- (void *)(IVY_SPM_BUF_BASE + IVY_SPM_BUF_SIZE));
- NOTICE(" NS <-> SP buffer : %p - %p\n",
- (void *)IVY_NS_BUF_BASE,
- (void *)(IVY_NS_BUF_BASE + IVY_NS_BUF_SIZE));
-}
-
-void ivy_message_handler(struct sprt_queue_entry_message *message)
-{
- u_register_t ret0 = 0U, ret1 = 0U, ret2 = 0U, ret3 = 0U;
-
- if (message->type == SPRT_MSG_TYPE_SERVICE_REQUEST) {
- switch (message->args[1]) {
-
- case IVY_PRINT_MAGIC:
- INFO("IVY: Magic: 0x%x\n", IVY_MAGIC_NUMBER);
- ret0 = SPRT_SUCCESS;
- break;
-
- case IVY_GET_MAGIC:
- ret1 = IVY_MAGIC_NUMBER;
- ret0 = SPRT_SUCCESS;
- break;
-
- case IVY_SLEEP_MS:
- sp_sleep(message->args[2]);
- ret0 = SPRT_SUCCESS;
- break;
-
- default:
- NOTICE("IVY: Unhandled Service ID 0x%x\n",
- (unsigned int)message->args[1]);
- ret0 = SPRT_NOT_SUPPORTED;
- break;
- }
- } else {
- NOTICE("Ivy: Unhandled Service type 0x%x\n",
- (unsigned int)message->type);
- ret0 = SPRT_NOT_SUPPORTED;
- }
-
-
- sprt_message_end(message, ret0, ret1, ret2, ret3);
-}
-
-void __dead2 ivy_main(void)
-{
- console_init(PL011_UART3_BASE,
- PL011_UART3_CLK_IN_HZ,
- PL011_BAUDRATE);
-
- NOTICE("Booting test Secure Partition Ivy\n");
- NOTICE("%s\n", build_message);
- NOTICE("%s\n", version_string);
- NOTICE("Running at S-EL0\n");
-
- ivy_print_memory_layout();
-
- /*
- * Handle secure service requests.
- */
- sprt_initialize_queues((void *)IVY_SPM_BUF_BASE);
-
- while (1) {
- struct sprt_queue_entry_message message;
-
- /*
- * Try to fetch a message from the blocking requests queue. If
- * it is empty, try to fetch from the non-blocking requests
- * queue. Repeat until both of them are empty.
- */
- while (1) {
- int err = sprt_get_next_message(&message,
- SPRT_QUEUE_NUM_BLOCKING);
- if (err == -ENOENT) {
- err = sprt_get_next_message(&message,
- SPRT_QUEUE_NUM_NON_BLOCKING);
- if (err == -ENOENT) {
- break;
- } else {
- assert(err == 0);
- ivy_message_handler(&message);
- }
- } else {
- assert(err == 0);
- ivy_message_handler(&message);
- }
- }
-
- sprt_wait_for_messages();
- }
-}
diff --git a/spm/ivy/shim/aarch64/spm_shim_entrypoint.S b/spm/ivy/shim/aarch64/spm_shim_entrypoint.S
new file mode 100644
index 000000000..55d8dd8a8
--- /dev/null
+++ b/spm/ivy/shim/aarch64/spm_shim_entrypoint.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <ivy_def.h>
+#include <platform_def.h>
+
+ .globl shim_entrypoint
+
+.section .bss.shim_stacks
+ .balign CACHE_WRITEBACK_GRANULE
+ .fill IVY_STACKS_SIZE
+shim_stacks_end:
+
+func shim_entrypoint
+
+ /* Setup the stack pointer. */
+ adr x0, shim_stacks_end
+ mov sp, x0
+
+ /* Setup vector base address */
+ adr x0, spm_shim_exceptions_ptr
+ msr vbar_el1, x0
+ isb
+
+ /*
+ * Invalidate the data cache for the shim and whole partition.
+ * This prevents re-use of stale data cache entries from prior
+ * bootloader stages.
+ */
+ adrp x0, __SHIM_TEXT_START__
+ adrp x1, __BSS_END__
+ sub x1, x1, x0
+ bl inv_dcache_range
+
+ /* Enable I-Cache */
+ mrs x0, sctlr_el1
+ orr x0, x0, #SCTLR_I_BIT
+ msr sctlr_el1, x0
+ isb
+
+ /* Relocate symbols */
+shim_pie_fixup:
+ ldr x0, =shim_pie_fixup
+ and x0, x0, #~(0x1000 - 1)
+ mov x1, #IVY_IMAGE_SIZE
+ add x1, x1, x0
+ bl fixup_gdt_reloc
+
+ /* Clear S-EL1 shim BSS */
+ adrp x0, __SHIM_BSS_START__
+ adrp x2, __SHIM_BSS_END__
+ sub x2, x2, x0
+ mov x1, xzr
+ bl memset
+
+ /* Clear S-EL0 partition BSS */
+ adrp x0, __BSS_START__
+ adrp x2, __BSS_END__
+ sub x2, x2, x0
+ mov x1, xzr
+ bl memset
+
+ /* And jump to the C entrypoint. */
+ bl shim_main
+
+ /* Exception return to S-EL0 Ivy application code */
+ adrp x0, ivy_entrypoint
+ msr elr_el1, x0
+
+ /* AArch64 EL0t */
+ mov x0, #((DAIF_FIQ_BIT | DAIF_IRQ_BIT) << SPSR_DAIF_SHIFT)
+ msr spsr_el1, x0
+
+ /* TODO: clear GP/SIMD registers */
+ /* TODO: tune EL0 system registers */
+
+ eret
+
+endfunc shim_entrypoint
diff --git a/spm/ivy/shim/aarch64/spm_shim_exceptions.S b/spm/ivy/shim/aarch64/spm_shim_exceptions.S
new file mode 100644
index 000000000..07527e6e1
--- /dev/null
+++ b/spm/ivy/shim/aarch64/spm_shim_exceptions.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+
+/* -----------------------------------------------------------------------------
+ * Very simple stackless exception handlers used by the spm shim layer.
+ * -----------------------------------------------------------------------------
+ */
+ .globl spm_shim_exceptions_ptr
+
+vector_base spm_shim_exceptions_ptr
+
+ /* -----------------------------------------------------
+ * Current EL with SP0 : 0x0 - 0x200
+ * -----------------------------------------------------
+ */
+vector_entry_spin sync_exception_sp_el0
+
+vector_entry_spin irq_sp_el0
+
+vector_entry_spin fiq_ep_el0
+
+vector_entry_spin serror_ep_el0
+
+ /* -----------------------------------------------------
+ * Current EL with SPx: 0x200 - 0x400
+ * -----------------------------------------------------
+ */
+vector_entry_spin sync_exception_sp_elx
+
+vector_entry_spin irq_sp_elx
+
+vector_entry_spin fiq_sp_elx
+
+vector_entry_spin serror_sp_elx
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch64 : 0x400 - 0x600. No exceptions
+ * are handled since secure_partition does not implement
+ * a lower EL
+ * -----------------------------------------------------
+ */
+vector_entry sync_exception_aarch64
+ msr tpidr_el1, x30
+ mrs x30, esr_el1
+ ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
+
+ cmp x30, #EC_AARCH64_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH32_SVC
+ b.eq do_smc
+
+ cmp x30, #EC_AARCH64_SYS
+ b.eq handle_sys_trap
+
+ /* Fail in all the other cases */
+ b panic
+
+ /* ---------------------------------------------
+ * Tell SPM that we are done initialising
+ * ---------------------------------------------
+ */
+do_smc:
+ mrs x30, tpidr_el1
+ smc #0
+ eret
+
+ /* AArch64 system instructions trap are handled as a panic for now */
+handle_sys_trap:
+panic:
+ b panic
+end_vector_entry sync_exception_aarch64
+
+vector_entry_spin irq_aarch64
+
+vector_entry_spin fiq_aarch64
+
+vector_entry_spin serror_aarch64
+
+ /* -----------------------------------------------------
+ * Lower EL using AArch32 : 0x600 - 0x800
+ * -----------------------------------------------------
+ */
+vector_entry_spin sync_exception_aarch32
+
+vector_entry_spin irq_aarch32
+
+vector_entry_spin fiq_aarch32
+
+vector_entry_spin serror_aarch32
diff --git a/spm/ivy/shim/shim_main.c b/spm/ivy/shim/shim_main.c
new file mode 100644
index 000000000..f6170a0f8
--- /dev/null
+++ b/spm/ivy/shim/shim_main.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <drivers/arm/pl011.h>
+#include <drivers/console.h>
+#include <errno.h>
+#include <ffa_helpers.h>
+#include <lib/aarch64/arch_helpers.h>
+#include <lib/xlat_tables/xlat_mmu_helpers.h>
+#include <lib/xlat_tables/xlat_tables_v2.h>
+#include <plat_arm.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+#include <sp_debug.h>
+#include <sp_helpers.h>
+#include <std_svc.h>
+
+#include "ivy.h"
+#include "ivy_def.h"
+
+static void shim_print_memory_layout(void)
+{
+ INFO("Secure Partition memory layout:\n");
+
+ INFO(" Image regions\n");
+ INFO(" Text region : %p - %p\n",
+ (void *)IVY_TEXT_START, (void *)IVY_TEXT_END);
+ INFO(" Read-only data region : %p - %p\n",
+ (void *)IVY_RODATA_START, (void *)IVY_RODATA_END);
+ INFO(" Data region : %p - %p\n",
+ (void *)IVY_DATA_START, (void *)IVY_DATA_END);
+ INFO(" BSS region : %p - %p\n",
+ (void *)IVY_BSS_START, (void *)IVY_BSS_END);
+ INFO(" Total image memory : %p - %p\n",
+ (void *)IVY_IMAGE_BASE,
+ (void *)(IVY_IMAGE_BASE + IVY_IMAGE_SIZE));
+ INFO(" SPM regions\n");
+ INFO(" SPM <-> SP buffer : %p - %p\n",
+ (void *)IVY_SPM_BUF_BASE,
+ (void *)(IVY_SPM_BUF_BASE + IVY_SPM_BUF_SIZE));
+ INFO(" NS <-> SP buffer : %p - %p\n",
+ (void *)IVY_NS_BUF_BASE,
+ (void *)(IVY_NS_BUF_BASE + IVY_NS_BUF_SIZE));
+}
+
+static void shim_plat_configure_mmu(void)
+{
+ mmap_add_region(SHIM_TEXT_START,
+ SHIM_TEXT_START,
+ SHIM_TEXT_END - SHIM_TEXT_START,
+ MT_CODE | MT_PRIVILEGED);
+ mmap_add_region(SHIM_RODATA_START,
+ SHIM_RODATA_START,
+ SHIM_RODATA_END - SHIM_RODATA_START,
+ MT_RO_DATA | MT_PRIVILEGED);
+ mmap_add_region(SHIM_DATA_START,
+ SHIM_DATA_START,
+ SHIM_DATA_END - SHIM_DATA_START,
+ MT_RW_DATA | MT_PRIVILEGED);
+ mmap_add_region(SHIM_BSS_START,
+ SHIM_BSS_START,
+ SHIM_BSS_END - SHIM_BSS_START,
+ MT_RW_DATA | MT_PRIVILEGED);
+ mmap_add_region(IVY_TEXT_START,
+ IVY_TEXT_START,
+ IVY_TEXT_END - IVY_TEXT_START,
+ MT_CODE | MT_USER);
+ mmap_add_region(IVY_RODATA_START,
+ IVY_RODATA_START,
+ IVY_RODATA_END - IVY_RODATA_START,
+ MT_RO_DATA | MT_USER);
+ mmap_add_region(IVY_DATA_START,
+ IVY_DATA_START,
+ IVY_DATA_END - IVY_DATA_START,
+ MT_RW_DATA | MT_USER);
+ mmap_add_region(IVY_BSS_START,
+ IVY_BSS_START,
+ IVY_BSS_END - IVY_BSS_START,
+ MT_RW_DATA | MT_USER);
+
+ init_xlat_tables();
+}
+
+int shim_main(void)
+{
+ assert(IS_IN_EL1() != 0);
+
+ /* Initialise console */
+ set_putc_impl(FFA_HVC_CALL_AS_STDOUT);
+
+ /* Configure and enable Stage-1 MMU, enable D-Cache */
+ shim_plat_configure_mmu();
+ enable_mmu_el1(0);
+
+ INFO("Booting S-EL1 Shim\n");
+
+ shim_print_memory_layout();
+
+ return 0;
+}
diff --git a/spm/quark/aarch64/quark_entrypoint.S b/spm/quark/aarch64/quark_entrypoint.S
deleted file mode 100644
index 9cbed976d..000000000
--- a/spm/quark/aarch64/quark_entrypoint.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-#include <quark_def.h>
-#include <platform_def.h>
-
- .globl quark_entrypoint
-
-.section .bss.stacks
- .balign CACHE_WRITEBACK_GRANULE
- .fill QUARK_STACKS_SIZE
-stacks_end:
-
-func quark_entrypoint
-
- /* Setup the stack pointer. */
- adr x0, stacks_end
- mov sp, x0
-
- /* And jump to the C entrypoint. */
- b quark_main
-
-endfunc quark_entrypoint
diff --git a/spm/quark/quark.dts b/spm/quark/quark.dts
deleted file mode 100644
index a8fb4a709..000000000
--- a/spm/quark/quark.dts
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <sp_res_desc_def.h>
-
-#include "quark_def.h"
-
-/* 4 KiB pages */
-#define PAGE_SIZE U(0x1000)
-
-/dts-v1/;
-
-/ {
- compatible = "arm,sp_rd";
-
- attribute {
- version = <0x00000001>;
- sp_type = <RD_ATTR_TYPE_UP_MIGRATABLE>;
- pe_mpidr = <0>; /* Unused */
- runtime_el = <RD_ATTR_RUNTIME_SEL0>;
- exec_type = <RD_ATTR_RUNTIME>;
- panic_policy = <RD_ATTR_PANIC_ONESHOT>;
- xlat_granule = <RD_ATTR_XLAT_GRANULE_4KB>;
- binary_size = <QUARK_IMAGE_SIZE>;
- load_address = <0x00000000 QUARK_IMAGE_BASE>;
- entrypoint = <0x00000000 QUARK_IMAGE_BASE>;
- };
-
- memory_regions {
- spm_buffer {
- str = "SPM buffer";
- base = <0x00000000 QUARK_SPM_BUF_BASE>;
- size = <0x00000000 QUARK_SPM_BUF_SIZE>;
- attr = <RD_MEM_NORMAL_SPM_SP_SHARED_MEM>;
- };
- };
-
- notifications {
- notification_0 {
- attr = <0>;
- pe = <0>;
- };
- };
-
- services {
- test_service_1 {
- uuid = <QUARK_SERVICE1_UUID_RD>;
-
- accessibility = <(RD_SERV_ACCESS_SECURE |
- RD_SERV_ACCESS_EL3 |
- RD_SERV_ACCESS_NORMAL)>;
- request_type = <(RD_SERV_SUPPORT_BLOCKING |
- RD_SERV_SUPPORT_NON_BLOCKING)>;
- connection_quota = <10>;
- sec_mem_size = <0>;
- interrupt_num = <0>;
- };
- };
-};
diff --git a/spm/quark/quark.h b/spm/quark/quark.h
deleted file mode 100644
index 2b4fd3964..000000000
--- a/spm/quark/quark.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef QUARK_H
-#define QUARK_H
-
-#include <stdint.h>
-
-/* Linker symbols used to figure out the memory layout of Quark. */
-extern uintptr_t __TEXT_START__, __TEXT_END__;
-#define QUARK_TEXT_START ((uintptr_t)&__TEXT_START__)
-#define QUARK_TEXT_END ((uintptr_t)&__TEXT_END__)
-
-extern uintptr_t __RODATA_START__, __RODATA_END__;
-#define QUARK_RODATA_START ((uintptr_t)&__RODATA_START__)
-#define QUARK_RODATA_END ((uintptr_t)&__RODATA_END__)
-
-extern uintptr_t __DATA_START__, __DATA_END__;
-#define QUARK_DATA_START ((uintptr_t)&__DATA_START__)
-#define QUARK_DATA_END ((uintptr_t)&__DATA_END__)
-
-extern uintptr_t __BSS_START__, __BSS_END__;
-#define QUARK_BSS_START ((uintptr_t)&__BSS_START__)
-#define QUARK_BSS_END ((uintptr_t)&__BSS_END__)
-
-#endif /* QUARK_H */
diff --git a/spm/quark/quark.ld.S b/spm/quark/quark.ld.S
deleted file mode 100644
index 8f51edb08..000000000
--- a/spm/quark/quark.ld.S
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <quark_def.h>
-#include <platform_def.h>
-#include <xlat_tables_defs.h>
-
-OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
-OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
-ENTRY(quark_entrypoint)
-
-SECTIONS
-{
- . = QUARK_IMAGE_BASE;
-
- ASSERT(. == ALIGN(PAGE_SIZE),
- "TEXT_START address is not aligned to PAGE_SIZE.")
-
- .text : {
- __TEXT_START__ = .;
- *quark_entrypoint.o(.text*)
- *(.text*)
- *(.vectors)
- . = NEXT(PAGE_SIZE);
- __TEXT_END__ = .;
- }
-
- .rodata : {
- . = ALIGN(PAGE_SIZE);
- __RODATA_START__ = .;
- *(.rodata*)
- . = NEXT(PAGE_SIZE);
- __RODATA_END__ = .;
- }
-
- .data : {
- . = ALIGN(PAGE_SIZE);
- __DATA_START__ = .;
- *(.data*)
- . = NEXT(PAGE_SIZE);
- __DATA_END__ = .;
- }
-
- .bss (NOLOAD) : {
- . = ALIGN(PAGE_SIZE);
- __BSS_START__ = .;
- *(SORT_BY_ALIGNMENT(.bss*))
- *(COMMON)
- . = NEXT(PAGE_SIZE);
- __BSS_END__ = .;
- }
-}
diff --git a/spm/quark/quark.mk b/spm/quark/quark.mk
deleted file mode 100644
index 0fe1646a5..000000000
--- a/spm/quark/quark.mk
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-include branch_protection.mk
-include lib/sprt/sprt_client.mk
-
-QUARK_DTB := $(BUILD_PLAT)/quark.dtb
-
-QUARK_INCLUDES := \
- -Iinclude \
- -Iinclude/common \
- -Iinclude/common/${ARCH} \
- -Iinclude/lib \
- -Iinclude/lib/${ARCH} \
- -Iinclude/lib/sprt \
- -Iinclude/lib/utils \
- -Iinclude/lib/xlat_tables \
- -Iinclude/runtime_services \
- -Iinclude/runtime_services/secure_el0_payloads \
- -Ispm/quark \
- -Ispm/common \
- ${SPRT_LIB_INCLUDES}
-
-QUARK_SOURCES := \
- $(addprefix spm/quark/, \
- aarch64/quark_entrypoint.S \
- quark_main.c \
- ) \
- $(addprefix spm/common/, \
- aarch64/sp_arch_helpers.S \
- sp_helpers.c \
- ) \
-
-# TODO: Remove dependency on TFTF files.
-QUARK_SOURCES += \
- tftf/framework/debug.c \
- tftf/framework/${ARCH}/asm_debug.S
-
-QUARK_SOURCES += drivers/console/${ARCH}/dummy_console.S \
- lib/locks/${ARCH}/spinlock.S \
- lib/utils/mp_printf.c \
- ${SPRT_LIB_SOURCES}
-
-QUARK_LINKERFILE := spm/quark/quark.ld.S
-
-QUARK_DEFINES :=
-
-$(eval $(call add_define,QUARK_DEFINES,ARM_ARCH_MAJOR))
-$(eval $(call add_define,QUARK_DEFINES,ARM_ARCH_MINOR))
-$(eval $(call add_define,QUARK_DEFINES,DEBUG))
-$(eval $(call add_define,QUARK_DEFINES,ENABLE_ASSERTIONS))
-$(eval $(call add_define,QUARK_DEFINES,ENABLE_BTI))
-$(eval $(call add_define,QUARK_DEFINES,ENABLE_PAUTH))
-$(eval $(call add_define,QUARK_DEFINES,FVP_CLUSTER_COUNT))
-$(eval $(call add_define,QUARK_DEFINES,FVP_MAX_CPUS_PER_CLUSTER))
-$(eval $(call add_define,QUARK_DEFINES,FVP_MAX_PE_PER_CPU))
-$(eval $(call add_define,QUARK_DEFINES,PLAT_${PLAT}))
-
-$(QUARK_DTB) : $(BUILD_PLAT)/quark $(BUILD_PLAT)/quark/quark.elf
-$(QUARK_DTB) : spm/quark/quark.dts
- @echo " DTBGEN spm/quark/quark.dts"
- ${Q}tools/generate_dtb/generate_dtb.sh \
- quark spm/quark/quark.dts $(BUILD_PLAT)
- @echo
- @echo "Built $@ successfully"
- @echo
-
-quark: $(QUARK_DTB)
diff --git a/spm/quark/quark_def.h b/spm/quark/quark_def.h
deleted file mode 100644
index 7a0ec15f8..000000000
--- a/spm/quark/quark_def.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef QUARK_DEF_H
-#define QUARK_DEF_H
-
-#include <utils_def.h>
-
-/*
- * Layout of the Secure Partition image.
- */
-
-/* The base address is 0 to reduce the address space size */
-#define QUARK_IMAGE_BASE ULL(0x00000000)
-#define QUARK_IMAGE_SIZE ULL(0x10000)
-
-/* Memory reserved for stacks */
-#define QUARK_STACKS_SIZE ULL(0x1000)
-
-/* Memory shared between EL3 and S-EL0 (64 KiB). */
-#define QUARK_SPM_BUF_BASE (QUARK_IMAGE_BASE + QUARK_IMAGE_SIZE)
-#define QUARK_SPM_BUF_SIZE ULL(0x10000)
-
-/*
- * UUIDs of Secure Services provided by Quark
- */
-
-/* Mass (MeV/c^2): Up, down, charm, strange */
-#define QUARK_SERVICE1_UUID U(0x23), U(0x48), U(0x1275), U(0x95)
-
-#define QUARK_SERVICE1_UUID_RD U(0x23) U(0x48) U(0x1275) U(0x95)
-
-/*
- * Service IDs
- */
-/* Return a magic number unique to QUARK */
-#define QUARK_GET_MAGIC U(2002)
-
-/* Mass (MeV/c^2): Top */
-#define QUARK_MAGIC_NUMBER U(0x173210)
-
-#endif /* QUARK_DEF_H */
diff --git a/spm/quark/quark_main.c b/spm/quark/quark_main.c
deleted file mode 100644
index 145560bf0..000000000
--- a/spm/quark/quark_main.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <assert.h>
-#include <cdefs.h>
-#include <errno.h>
-#include <quark_def.h>
-#include <sprt_client.h>
-#include <sprt_svc.h>
-#include <utils_def.h>
-
-#include "quark.h"
-#include "quark_def.h"
-
-/* NOTE: This partition doesn't have text output capabilities */
-
-static void quark_message_handler(struct sprt_queue_entry_message *message)
-{
- u_register_t ret0 = 0U, ret1 = 0U, ret2 = 0U, ret3 = 0U;
-
- if (message->type == SPRT_MSG_TYPE_SERVICE_REQUEST) {
- switch (message->args[1]) {
-
- case QUARK_GET_MAGIC:
- ret1 = QUARK_MAGIC_NUMBER;
- ret0 = SPRT_SUCCESS;
- break;
-
- default:
- ret0 = SPRT_NOT_SUPPORTED;
- break;
- }
- } else {
- ret0 = SPRT_NOT_SUPPORTED;
- }
-
- sprt_message_end(message, ret0, ret1, ret2, ret3);
-}
-
-void __dead2 quark_main(void)
-{
- /*
- * Handle secure service requests.
- */
- sprt_initialize_queues((void *)QUARK_SPM_BUF_BASE);
-
- while (1) {
- struct sprt_queue_entry_message message;
-
- /*
- * Try to fetch a message from the blocking requests queue. If
- * it is empty, try to fetch from the non-blocking requests
- * queue. Repeat until both of them are empty.
- */
- while (1) {
- int err = sprt_get_next_message(&message,
- SPRT_QUEUE_NUM_BLOCKING);
- if (err == -ENOENT) {
- err = sprt_get_next_message(&message,
- SPRT_QUEUE_NUM_NON_BLOCKING);
- if (err == -ENOENT) {
- break;
- } else {
- assert(err == 0);
- quark_message_handler(&message);
- }
- } else {
- assert(err == 0);
- quark_message_handler(&message);
- }
- }
-
- sprt_wait_for_messages();
- }
-}
diff --git a/tftf/framework/aarch64/arch.c b/tftf/framework/aarch64/arch.c
index 56369ae80..f1223a1ee 100644
--- a/tftf/framework/aarch64/arch.c
+++ b/tftf/framework/aarch64/arch.c
@@ -5,7 +5,10 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <arch_features.h>
#include <arch_helpers.h>
+#include <arch_features.h>
+#include <tftf_lib.h>
void tftf_arch_setup(void)
{
@@ -23,11 +26,25 @@ void tftf_arch_setup(void)
write_hcr_el2(HCR_TGE_BIT);
/*
- * Disable trap of SVE instructions to EL2.
+ * Disable trap of SVE, SME instructions to EL2.
* The fields of the CPTR_EL2 register reset to an
* architecturally UNKNOWN value.
*/
- write_cptr_el2(CPTR_EL2_RES1);
+ write_cptr_el2(CPTR_EL2_RESET_VAL);
isb();
+
+ /*
+ * Enable access to ZT0 storage when FEAT_SME2 is implemented
+ * and enable FA64 when FEAT_SME_FA64 is implemented
+ */
+ if (is_feat_sme_supported()) {
+ write_smcr_el2(SMCR_EL2_RESET_VAL);
+ isb();
+ }
+
+ /* Clear SVE hint bit */
+ if (is_armv8_2_sve_present()) {
+ tftf_smc_set_sve_hint(false);
+ }
}
}
diff --git a/tftf/framework/aarch64/entrypoint.S b/tftf/framework/aarch64/entrypoint.S
index 6aa564562..7937b8804 100644
--- a/tftf/framework/aarch64/entrypoint.S
+++ b/tftf/framework/aarch64/entrypoint.S
@@ -18,11 +18,13 @@
*/
func tftf_entrypoint
/* --------------------------------------------------------------------
- * Save arguments x0, x1 from the previous Boot loader
+ * Save arguments x0-x3 from the previous bootloader.
* --------------------------------------------------------------------
*/
mov x20, x0
mov x21, x1
+ mov x22, x2
+ mov x23, x3
bl arch_init
@@ -79,13 +81,15 @@ func tftf_entrypoint
bl platform_set_stack
/* --------------------------------------------------------------------
- * Save fw_config and hw_config addresses passed in x0, x1 from the
- * previous boot loader
+ * Save the fw_config or transfer list and hw_config addresses passed
+ * in registers x0 to x3 from the previous bootloader.
* --------------------------------------------------------------------
*/
mov x0, x20
mov x1, x21
- bl save_fw_hw_configs
+ mov x2, x22
+ mov x3, x23
+ bl save_handoff_params
/* --------------------------------------------------------------------
* tftf_cold_boot_main() will perform the remaining architectural and
@@ -204,10 +208,22 @@ el1_setup:
ret
endfunc arch_init
-/* Set fw_config and hw_config addresses passed in x0, x1 */
-func save_fw_hw_configs
+
+/* ----------------------------------------------------------------------------
+ * Save fw_config or transfer list and hw_config addresses passed in registers
+ * x0 to x3 from the previous bootloader.
+ * ----------------------------------------------------------------------------
+ */
+func save_handoff_params
+#if TRANSFER_LIST
+ adrp x4, ns_tl
+ str x3, [x4, :lo12:ns_tl]
+ str x1, [x4, :lo12:tl_signature]
+ str x0, [x4, :lo12:hw_config_base]
+#else
adrp x2, fw_config_base
str x0, [x2, :lo12:fw_config_base]
str x1, [x2, :lo12:hw_config_base]
+#endif
ret
-endfunc save_fw_hw_configs
+endfunc save_handoff_params
diff --git a/tftf/framework/aarch64/exceptions.S b/tftf/framework/aarch64/exceptions.S
index 677b30fe9..471bef7fd 100644
--- a/tftf/framework/aarch64/exceptions.S
+++ b/tftf/framework/aarch64/exceptions.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -23,38 +23,42 @@ vector_base tftf_vector
/*
* Current EL with SP0 : 0x0 - 0x200.
*/
-unhandled_exception SynchronousExceptionSP0
-unhandled_exception IrqSP0
-unhandled_exception FiqSP0
-unhandled_exception SErrorSP0
+unhandled_exception sync_exception_sp_el0
+unhandled_exception irq_sp_el0
+unhandled_exception fiq_sp_el0
+unhandled_exception serror_sp_el0
/*
* Current EL with SPx : 0x200 - 0x400.
*/
-unhandled_exception SynchronousExceptionSPx
+vector_entry sync_spx
+ b sync_exception_vector_entry
+end_vector_entry sync_spx
-vector_entry IrqSPx
+vector_entry irq_sp_elx
b irq_vector_entry
-end_vector_entry IrqSPx
+end_vector_entry irq_sp_elx
-unhandled_exception FiqSPx
-unhandled_exception SErrorSPx
+unhandled_exception fiq_sp_elx
+vector_entry serror_sp_elx
+ b serror_vector_entry
+end_vector_entry serror_sp_elx
/*
* Lower EL using AArch64 : 0x400 - 0x600.
*/
-unhandled_exception SynchronousExceptionA64
-unhandled_exception IrqA64
-unhandled_exception FiqA64
-unhandled_exception SErrorA64
+unhandled_exception sync_exception_aarch64
+unhandled_exception irq_aarch64
+unhandled_exception fiq_aarch64
+unhandled_exception serror_aarch64
/*
* Lower EL using AArch32 : 0x600 - 0x800.
*/
-unhandled_exception SynchronousExceptionA32
-unhandled_exception IrqA32
-unhandled_exception FiqA32
-unhandled_exception SErrorA32
+unhandled_exception sync_exception_aarch32
+unhandled_exception irq_aarch32
+unhandled_exception fiq_aarch32
+unhandled_exception serror_aarch32
.macro save_gp_regs
stp x0, x1, [sp, #0x0]
@@ -95,6 +99,22 @@ unhandled_exception SErrorA32
ldp x0, x1, [sp, #0x0]
.endm
+func sync_exception_vector_entry
+ sub sp, sp, #0x100
+ save_gp_regs
+ mov x19, sp
+ bl tftf_sync_exception_handler
+ cbnz x0, 0f
+ mov x0, x19
+ /* Save original stack pointer value on the stack */
+ add x1, x0, #0x100
+ str x1, [x0, #0xf8]
+ b print_exception
+0: restore_gp_regs
+ add sp, sp, #0x100
+ eret
+endfunc sync_exception_vector_entry
+
func irq_vector_entry
sub sp, sp, #0x100
save_gp_regs
@@ -104,6 +124,21 @@ func irq_vector_entry
eret
endfunc irq_vector_entry
+func serror_vector_entry
+ sub sp, sp, #0x100
+ save_gp_regs
+ bl tftf_serror_handler
+ cbnz x0, 1f
+ mov x0, x19
+ /* Save original stack pointer value on the stack */
+ add x1, x0, #0x100
+ str x1, [x0, #0xf8]
+ b print_exception
+1: restore_gp_regs
+ add sp, sp, #0x100
+ eret
+endfunc serror_vector_entry
+
func crash_dump
/* Save general-purpose registers on the stack. */
sub sp, sp, #0x100
diff --git a/tftf/framework/framework.mk b/tftf/framework/framework.mk
index 8ec18ea9a..b25d1d029 100644
--- a/tftf/framework/framework.mk
+++ b/tftf/framework/framework.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -25,9 +25,10 @@ TFTF_INCLUDES := \
-Iinclude/runtime_services \
-Iinclude/runtime_services/secure_el0_payloads \
-Iinclude/runtime_services/secure_el1_payloads \
+ -Iinclude/runtime_services/host_realm_managment \
-Ispm/cactus \
-Ispm/ivy \
- -Ispm/quark \
+ -Irealm \
-Ismc_fuzz/include
FRAMEWORK_SOURCES := ${AUTOGEN_DIR}/tests_list.c
@@ -53,7 +54,7 @@ FRAMEWORK_SOURCES += \
lib/events/events.c \
lib/extensions/amu/${ARCH}/amu.c \
lib/extensions/amu/${ARCH}/amu_helpers.S \
- lib/irq/irq.c \
+ lib/exceptions/irq.c \
lib/locks/${ARCH}/spinlock.S \
lib/power_management/hotplug/hotplug.c \
lib/power_management/suspend/${ARCH}/asm_tftf_suspend.S \
@@ -63,6 +64,8 @@ FRAMEWORK_SOURCES += \
lib/smc/${ARCH}/asm_smc.S \
lib/smc/${ARCH}/smc.c \
lib/trng/trng.c \
+ lib/errata_abi/errata_abi.c \
+ lib/transfer_list/transfer_list.c \
lib/trusted_os/trusted_os.c \
lib/utils/mp_printf.c \
lib/utils/uuid.c \
@@ -79,8 +82,15 @@ FRAMEWORK_SOURCES += ${COMPILER_RT_SRCS}
ifeq (${ARCH},aarch64)
# ARMv8.3 Pointer Authentication support files
FRAMEWORK_SOURCES += \
+ lib/exceptions/aarch64/sync.c \
+ lib/exceptions/aarch64/serror.c \
lib/extensions/pauth/aarch64/pauth.c \
- lib/extensions/pauth/aarch64/pauth_helpers.S
+ lib/extensions/pauth/aarch64/pauth_helpers.S \
+ lib/extensions/sme/aarch64/sme.c \
+ lib/extensions/sme/aarch64/sme_helpers.S \
+ lib/extensions/sme/aarch64/sme2_helpers.S \
+ lib/extensions/sve/aarch64/sve.c \
+ lib/extensions/sve/aarch64/sve_helpers.S
endif
TFTF_LINKERFILE := tftf/framework/tftf.ld.S
diff --git a/tftf/framework/include/tftf.h b/tftf/framework/include/tftf.h
index 43f1e7ea5..b9bf705b9 100644
--- a/tftf/framework/include/tftf.h
+++ b/tftf/framework/include/tftf.h
@@ -15,7 +15,7 @@
#define TFTF_WELCOME_STR "Booting trusted firmware test framework"
/* Maximum size of test output (in bytes) */
-#define TESTCASE_OUTPUT_MAX_SIZE 512
+#define TESTCASE_OUTPUT_MAX_SIZE 1024
/* Size of build message used to differentiate different TFTF binaries */
#define BUILD_MESSAGE_SIZE 0x20
diff --git a/tftf/framework/main.c b/tftf/framework/main.c
index 2350b9622..0701e281b 100644
--- a/tftf/framework/main.c
+++ b/tftf/framework/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -24,6 +24,11 @@
#include <tftf.h>
#include <tftf_lib.h>
#include <timer.h>
+#if TRANSFER_LIST
+#include <transfer_list.h>
+#endif
+
+#define MIN_RETRY_TO_POWER_ON_LEAD_CPU 10
/* version information for TFTF */
extern const char version_string[];
@@ -42,7 +47,12 @@ static u_register_t cpu_on_ctx_id_arr[PLATFORM_CORE_COUNT];
static unsigned int test_is_rebooting;
/* Parameters arg0 and arg1 passed from BL31 */
+#if TRANSFER_LIST
+u_register_t ns_tl;
+u_register_t tl_signature;
+#else
u_register_t fw_config_base;
+#endif
u_register_t hw_config_base;
static inline const test_suite_t *current_testsuite(void)
@@ -309,6 +319,7 @@ static unsigned int close_test(void)
static void __dead2 hand_over_to_lead_cpu(void)
{
int ret;
+ unsigned int tftf_cpu_pwr_on_ctr = 0U;
unsigned int mpid = read_mpidr_el1() & MPID_MASK;
unsigned int core_pos = platform_get_core_pos(mpid);
@@ -321,8 +332,24 @@ static void __dead2 hand_over_to_lead_cpu(void)
* doesn't matter because it will be overwritten by prepare_next_test().
* Pass a NULL pointer to easily catch the problem in case something
* goes wrong.
+ *
+ * In CI with four world system (Normal, Secure, Root and Realm), on few
+ * instances, while the framework tries to turn on the CPU for next-test
+ * it fails to do so and receives error code (-4 : ALREADY_ON).
+ * This is due to the fact that the lead-cpu is still powering down as
+ * per EL-3 but invisible to EL-2. Hence retrying it in a loop with a
+ * small delay in bewteen for certain iterations will resolve it.
*/
- ret = tftf_cpu_on(lead_cpu_mpid, 0, 0);
+ while (tftf_cpu_pwr_on_ctr < MIN_RETRY_TO_POWER_ON_LEAD_CPU) {
+ ret = tftf_cpu_on(lead_cpu_mpid, 0, 0);
+ if (ret == PSCI_E_SUCCESS) {
+ break;
+ } else {
+ tftf_cpu_pwr_on_ctr += 1;
+ waitms(1);
+ }
+ }
+
if (ret != PSCI_E_SUCCESS) {
ERROR("CPU%u: Failed to power on lead CPU%u (%d)\n",
core_pos, platform_get_core_pos(lead_cpu_mpid), ret);
@@ -531,7 +558,7 @@ void __dead2 tftf_cold_boot_main(void)
* authentication would fail then.
*/
#if ENABLE_PAUTH
- assert(is_armv8_3_pauth_apa_api_present());
+ assert(is_armv8_3_pauth_apa_api_apa3_present());
/*
* Program APIAKey_EL1 key and enable ARMv8.3-PAuth here as this
diff --git a/tftf/framework/tftf.ld.S b/tftf/framework/tftf.ld.S
index 9374206f1..3621d9717 100644
--- a/tftf/framework/tftf.ld.S
+++ b/tftf/framework/tftf.ld.S
@@ -1,18 +1,20 @@
/*
- * Copyright (c) 2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <platform_def.h>
#include <xlat_tables_defs.h>
+#include <host_realm_mem_layout.h>
+
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
ENTRY(tftf_entrypoint)
MEMORY {
- RAM (rwx): ORIGIN = DRAM_BASE, LENGTH = DRAM_SIZE
+ RAM (rwx): ORIGIN = TFTF_BASE, LENGTH = DRAM_SIZE
}
@@ -45,10 +47,30 @@ SECTIONS
.data : {
__DATA_START__ = .;
*(.data*)
+ . = NEXT(PAGE_SIZE); /* This ensures tftf.bin is aligned to page size. */
__DATA_END__ = .;
} >RAM
- stacks (NOLOAD) : {
+ /* End of LOAD Sections. NOLOAD sections begin here. */
+ /*
+ * Memory for Realm Image has to follow next as it will appended to end
+ * of tftf.bin.
+ */
+ realm_payload (NOLOAD) : {
+ __REALM_PAYLOAD_START__ = .;
+ . = __REALM_PAYLOAD_START__ + REALM_MAX_LOAD_IMG_SIZE;
+ __REALM_PAYLOAD_END__ = .;
+ } >RAM
+
+ /* Memory pool for Realm payload tests. */
+ realm_pool (NOLOAD) : ALIGN(PAGE_SIZE) {
+ __REALM_POOL_START__ = .;
+ . = __REALM_POOL_START__ + (NS_REALM_SHARED_MEM_SIZE * MAX_REALM_COUNT) +
+ (PAGE_POOL_MAX_SIZE * MAX_REALM_COUNT);
+ __REALM_POOL_END__ = .;
+ } >RAM
+
+ stacks (NOLOAD) : ALIGN(16) {
__STACKS_START__ = .;
*(tftf_normal_stacks)
__STACKS_END__ = .;
@@ -56,9 +78,9 @@ SECTIONS
/*
* The .bss section gets initialised to 0 at runtime.
- * Its base address must be 16-byte aligned.
+ * Its base address is always PAGE_SIZE aligned.
*/
- .bss : ALIGN(16) {
+ .bss : {
__BSS_START__ = .;
*(SORT_BY_ALIGNMENT(.bss*))
*(COMMON)
@@ -67,10 +89,9 @@ SECTIONS
/*
* The xlat_table section is for full, aligned page tables (4K).
- * Removing them from .bss avoids forcing 4K alignment on
- * the .bss section and eliminates the unecessary zero init
+ * Removing them from .bss eliminates the unecessary zero init
*/
- xlat_table (NOLOAD) : {
+ xlat_table (NOLOAD) : ALIGN(PAGE_SIZE) {
*(xlat_table)
} >RAM
@@ -105,7 +126,6 @@ SECTIONS
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
-
__TFTF_END__ = .;
__BSS_SIZE__ = SIZEOF(.bss);
diff --git a/tftf/tests/aarch32_tests_to_skip.txt b/tftf/tests/aarch32_tests_to_skip.txt
new file mode 100644
index 000000000..210d46536
--- /dev/null
+++ b/tftf/tests/aarch32_tests_to_skip.txt
@@ -0,0 +1,20 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+Realm payload at EL1
+SIMD,SVE Registers context
+Invalid memory access with RME extension
+FF-A Setup and Discovery
+SP exceptions
+FF-A Direct messaging
+FF-A Group0 interrupts
+FF-A Power management
+FF-A Memory Sharing
+SIMD,SVE Registers context
+FF-A Interrupt
+SMMUv3 tests
+FF-A Notifications
+RMI and SPM tests
+FF-A SMCCC compliance
diff --git a/tftf/tests/common/test_helpers.c b/tftf/tests/common/test_helpers.c
index d794bebc7..6a0b08bd0 100644
--- a/tftf/tests/common/test_helpers.c
+++ b/tftf/tests/common/test_helpers.c
@@ -1,18 +1,17 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <stdlib.h>
+
#include <arch_helpers.h>
#include <plat_topology.h>
#include <platform.h>
-#include <power_management.h>
#include <test_helpers.h>
#include <tftf_lib.h>
-static struct mailbox_buffers test_mb = {.send = NULL, .recv = NULL};
-
int is_sys_suspend_state_ready(void)
{
int aff_info;
@@ -131,48 +130,33 @@ test_result_t map_test_unmap(const map_args_unmap_t *args,
return test_ret;
}
-void set_tftf_mailbox(const struct mailbox_buffers *mb)
+/*
+ * Utility function to wait for all CPUs other than the caller to be
+ * OFF.
+ */
+void wait_for_non_lead_cpus(void)
{
- if (mb != NULL) {
- test_mb = *mb;
- }
-}
+ unsigned int target_mpid, target_node;
-bool get_tftf_mailbox(struct mailbox_buffers *mb)
-{
- if ((test_mb.recv != NULL) && (test_mb.send != NULL)) {
- *mb = test_mb;
- return true;
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ wait_for_core_to_turn_off(target_mpid);
}
- return false;
}
-test_result_t check_spmc_testing_set_up(
- uint32_t ffa_version_major, uint32_t ffa_version_minor,
- const struct ffa_uuid *ffa_uuids, size_t ffa_uuids_size)
+void wait_for_core_to_turn_off(unsigned int mpidr)
{
- struct mailbox_buffers mb;
+ /* Skip lead CPU, as it is powered on */
+ if (mpidr == (read_mpidr_el1() & MPID_MASK))
+ return;
- if (ffa_uuids == NULL) {
- ERROR("Invalid parameter ffa_uuids!\n");
- return TEST_RESULT_FAIL;
- }
-
- SKIP_TEST_IF_FFA_VERSION_LESS_THAN(ffa_version_major,
- ffa_version_minor);
-
- /**********************************************************************
- * If OP-TEE is SPMC skip the current test.
- **********************************************************************/
- if (check_spmc_execution_level()) {
- VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
- return TEST_RESULT_SKIPPED;
+ while (tftf_psci_affinity_info(mpidr, MPIDR_AFFLVL0) != PSCI_STATE_OFF) {
+ continue;
}
+}
- GET_TFTF_MAILBOX(mb);
-
- for (unsigned int i = 0U; i < ffa_uuids_size; i++)
- SKIP_TEST_IF_FFA_ENDPOINT_NOT_DEPLOYED(*mb, ffa_uuids[i].uuid);
-
- return TEST_RESULT_SUCCESS;
+/* Generate 64-bit random number */
+unsigned long long rand64(void)
+{
+ return ((unsigned long long)rand() << 32) | rand();
}
diff --git a/tftf/tests/extensions/afp/test_afp.c b/tftf/tests/extensions/afp/test_afp.c
new file mode 100644
index 000000000..625d9cf48
--- /dev/null
+++ b/tftf/tests/extensions/afp/test_afp.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+
+test_result_t test_afp_support(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ test_result_t ret;
+ uint64_t saved_fpcr, fpcr;
+
+ SKIP_TEST_IF_AFP_NOT_SUPPORTED();
+
+ saved_fpcr = read_fpcr();
+ /* Write advanced floating point controlling bits */
+ write_fpcr(saved_fpcr | FPCR_FIZ_BIT | FPCR_AH_BIT | FPCR_NEP_BIT);
+
+ fpcr = read_fpcr();
+ /* Check if all bits got written successfully */
+ if ((fpcr | ~(FPCR_FIZ_BIT | FPCR_AH_BIT | FPCR_NEP_BIT)) == ~0ULL) {
+ ret = TEST_RESULT_SUCCESS;
+ } else {
+ ret = TEST_RESULT_FAIL;
+ }
+
+ write_fpcr(saved_fpcr);
+
+ return ret;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/brbe/test_brbe.c b/tftf/tests/extensions/brbe/test_brbe.c
new file mode 100644
index 000000000..f2c244a32
--- /dev/null
+++ b/tftf/tests/extensions/brbe/test_brbe.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/*
+ * EL3 is expected to allow access to branch record buffer control registers
+ * from NS world. Accessing these registers will trap to EL3 and crash when EL3
+ * has not properly enabled it.
+ */
+test_result_t test_brbe_enabled(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_BRBE_NOT_SUPPORTED();
+
+ read_brbcr_el1();
+ read_brbcr_el2();
+ read_brbfcr_el1();
+ read_brbts_el1();
+ read_brbinfinj_el1();
+ read_brbsrcinj_el1();
+ read_brbtgtinj_el1();
+ read_brbidr0_el1();
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/fgt/test_fgt.c b/tftf/tests/extensions/fgt/test_fgt.c
index 6213d4bf5..5d9600d72 100644
--- a/tftf/tests/extensions/fgt/test_fgt.c
+++ b/tftf/tests/extensions/fgt/test_fgt.c
@@ -10,6 +10,38 @@
#include <string.h>
#include <arch_helpers.h>
+#ifdef __aarch64__
+
+static bool is_init_val_set(u_register_t reg, u_register_t init_val,
+ u_register_t feat_mask)
+{
+ return (reg & feat_mask) == (init_val & feat_mask);
+}
+
+#define CHECK_FEAT_TRAP_INITIALIZED(_reg, _REG, _feat_check, _FEAT) \
+do { \
+ if (_feat_check() != 0) { \
+ if (is_init_val_set(_reg, _REG ## _INIT_VAL, \
+ _REG ## _ ## FEAT_ ## _FEAT ## _MASK) == 0) { \
+ return TEST_RESULT_FAIL; \
+ } \
+ } \
+} while (false);
+
+#define CHECK_FEAT_TRAP_INITIALIZED2(_reg, _REG, _feat_check, _FEAT, \
+ _feat2_check, _FEAT2, _op) \
+do { \
+ if ((_feat_check() != 0) _op (_feat2_check() != 0)) { \
+ if (is_init_val_set(_reg, _REG ## _INIT_VAL, _REG ## _ \
+ ## FEAT_ ## _FEAT ## _ ## _FEAT2 ## _MASK) \
+ == 0) { \
+ return TEST_RESULT_FAIL; \
+ } \
+ } \
+} while (false);
+
+#endif
+
/*
* TF-A is expected to allow access to ARMv8.6-FGT system registers from EL2.
* Reading these registers causes a trap to EL3 and crash when TF-A has not
@@ -21,12 +53,82 @@ test_result_t test_fgt_enabled(void)
#ifdef __aarch64__
SKIP_TEST_IF_FGT_NOT_SUPPORTED();
- read_hfgrtr_el2();
- read_hfgwtr_el2();
- read_hfgitr_el2();
+
+ u_register_t hfgitr_el2 = read_hfgitr_el2();
+ u_register_t hfgrtr_el2 = read_hfgrtr_el2();
+ u_register_t hfgwtr_el2 = read_hfgwtr_el2();
+
+ /*
+ * The following registers are not supposed to be consumed, but
+ * are read to test their presence when FEAT_FGT is supported.
+ */
read_hdfgrtr_el2();
read_hdfgwtr_el2();
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ get_feat_brbe_support, BRBE)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_specres_present, SPECRES)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_tlbirange_present, TLBIRANGE)
+ CHECK_FEAT_TRAP_INITIALIZED2(hfgitr_el2, HFGITR_EL2, \
+ is_feat_tlbirange_present, TLBIRANGE, \
+ is_feat_tlbios_present, TLBIOS, &&)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_tlbios_present, TLBIOS)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_armv8_2_pan2_present, PAN2)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgitr_el2, HFGITR_EL2, \
+ is_feat_dpb2_present, DPB2)
+ if (is_init_val_set(hfgitr_el2, HFGITR_EL2_INIT_VAL,
+ HFGITR_EL2_NON_FEAT_DEPENDENT_MASK) == 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_sme_supported, SME)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_ls64_accdata_present, LS64_ACCDATA)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_ras_present, RAS)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_rasv1p1_present, RASV1P1)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_gicv3_gicv4_present, GICV3)
+ CHECK_FEAT_TRAP_INITIALIZED2(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_csv2_2_present, CSV2_2, \
+ is_feat_csv2_1p2_present, CSV2_1P2, ||)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_feat_lor_present, LOR)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgrtr_el2, HFGRTR_EL2, \
+ is_armv8_3_pauth_apa_api_apa3_present, PAUTH)
+ if (is_init_val_set(hfgrtr_el2, HFGRTR_EL2_INIT_VAL,
+ HFGRTR_EL2_NON_FEAT_DEPENDENT_MASK) == 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_sme_supported, SME)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_ls64_accdata_present, LS64_ACCDATA);
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_ras_present, RAS);
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_rasv1p1_present, RASV1P1);
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_gicv3_gicv4_present, GICV3);
+ CHECK_FEAT_TRAP_INITIALIZED2(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_csv2_2_present, CSV2_2, \
+ is_feat_csv2_1p2_present, CSV2_1P2, ||)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_feat_lor_present, LOR)
+ CHECK_FEAT_TRAP_INITIALIZED(hfgwtr_el2, HFGWTR_EL2, \
+ is_armv8_3_pauth_apa_api_apa3_present, PAUTH)
+ if (is_init_val_set(hfgwtr_el2, HFGWTR_EL2_INIT_VAL, \
+ HFGWTR_EL2_NON_FEAT_DEPENDENT_MASK) == 0) {
+ return TEST_RESULT_FAIL;
+ }
+
return TEST_RESULT_SUCCESS;
#endif /* __aarch64__ */
}
diff --git a/tftf/tests/extensions/hcx/test_hcx.c b/tftf/tests/extensions/hcx/test_hcx.c
new file mode 100644
index 000000000..3621f2126
--- /dev/null
+++ b/tftf/tests/extensions/hcx/test_hcx.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <tftf_lib.h>
+#include <tftf.h>
+#include <arch_helpers.h>
+#include <arch_features.h>
+
+/* This very simple test just ensures that HCRX_EL2 access does not trap. */
+test_result_t test_feat_hcx_enabled(void)
+{
+#ifdef __aarch64__
+ u_register_t hcrx_el2;
+
+ /* Make sure FEAT_HCX is supported. */
+ if (!get_feat_hcx_support()) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Attempt to read HCRX_EL2, if not enabled this should trap to EL3. */
+ hcrx_el2 = read_hcrx_el2();
+
+ /*
+ * If we make it this far, access to HCRX_EL2 was not trapped, and
+ * therefore FEAT_HCX is supported.
+ */
+ if (hcrx_el2 == HCRX_EL2_INIT_VAL) {
+ /*
+ * If the value of the register is the reset value, the test
+ * passed.
+ */
+ return TEST_RESULT_SUCCESS;
+ }
+ /*
+ * Otherwise, the test fails, as the HCRX_EL2 register has
+ * not been initialized properly.
+ */
+ return TEST_RESULT_FAIL;
+#else
+ /* Skip test if AArch32 */
+ return TEST_RESULT_SKIPPED;
+#endif
+}
diff --git a/tftf/tests/extensions/mpam/test_mpam.c b/tftf/tests/extensions/mpam/test_mpam.c
new file mode 100644
index 000000000..eb40bc5ca
--- /dev/null
+++ b/tftf/tests/extensions/mpam/test_mpam.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/* EL3 is expected to allow access to MPAM system registers from EL2.
+ * Reading these registers will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+
+test_result_t test_mpam_reg_access(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_MPAM_NOT_SUPPORTED();
+
+ read_mpamidr_el1();
+ read_mpam2_el2();
+
+ return TEST_RESULT_SUCCESS;
+#endif
+}
diff --git a/tftf/tests/extensions/pauth/test_pauth.c b/tftf/tests/extensions/pauth/test_pauth.c
index 30b78ef19..ada2f1d70 100644
--- a/tftf/tests/extensions/pauth/test_pauth.c
+++ b/tftf/tests/extensions/pauth/test_pauth.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Arm Limited. All rights reserved.
+ * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,155 +14,9 @@
#include <string.h>
#ifdef __aarch64__
-
-/* Number of ARMv8.3-PAuth keys */
-#define NUM_KEYS 5
-
-static const char * const key_name[] = {"IA", "IB", "DA", "DB", "GA"};
-
static uint128_t pauth_keys_before[NUM_KEYS];
static uint128_t pauth_keys_after[NUM_KEYS];
-
-/* Check if ARMv8.3-PAuth key is enabled */
-static bool is_pauth_key_enabled(uint64_t key_bit)
-{
- return (IS_IN_EL2() ?
- ((read_sctlr_el2() & key_bit) != 0U) :
- ((read_sctlr_el1() & key_bit) != 0U));
-}
-
-static test_result_t compare_pauth_keys(void)
-{
- test_result_t result = TEST_RESULT_SUCCESS;
-
- for (unsigned int i = 0; i < NUM_KEYS; ++i) {
- if (pauth_keys_before[i] != pauth_keys_after[i]) {
- ERROR("AP%sKey_EL1 read 0x%llx:%llx "
- "expected 0x%llx:%llx\n", key_name[i],
- (uint64_t)(pauth_keys_after[i] >> 64),
- (uint64_t)(pauth_keys_after[i]),
- (uint64_t)(pauth_keys_before[i] >> 64),
- (uint64_t)(pauth_keys_before[i]));
-
- result = TEST_RESULT_FAIL;
- }
- }
- return result;
-}
-
-/*
- * Program or read ARMv8.3-PAuth keys (if already enabled)
- * and store them in <pauth_keys_before> buffer
- */
-static void set_store_pauth_keys(void)
-{
- uint128_t plat_key;
-
- memset(pauth_keys_before, 0, NUM_KEYS * sizeof(uint128_t));
-
- if (is_armv8_3_pauth_apa_api_present()) {
- if (is_pauth_key_enabled(SCTLR_EnIA_BIT)) {
- /* Read APIAKey_EL1 */
- plat_key = read_apiakeylo_el1() |
- ((uint128_t)(read_apiakeyhi_el1()) << 64);
- INFO("EnIA is set\n");
- } else {
- /* Program APIAKey_EL1 */
- plat_key = init_apkey();
- write_apiakeylo_el1((uint64_t)plat_key);
- write_apiakeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[0] = plat_key;
-
- if (is_pauth_key_enabled(SCTLR_EnIB_BIT)) {
- /* Read APIBKey_EL1 */
- plat_key = read_apibkeylo_el1() |
- ((uint128_t)(read_apibkeyhi_el1()) << 64);
- INFO("EnIB is set\n");
- } else {
- /* Program APIBKey_EL1 */
- plat_key = init_apkey();
- write_apibkeylo_el1((uint64_t)plat_key);
- write_apibkeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[1] = plat_key;
-
- if (is_pauth_key_enabled(SCTLR_EnDA_BIT)) {
- /* Read APDAKey_EL1 */
- plat_key = read_apdakeylo_el1() |
- ((uint128_t)(read_apdakeyhi_el1()) << 64);
- INFO("EnDA is set\n");
- } else {
- /* Program APDAKey_EL1 */
- plat_key = init_apkey();
- write_apdakeylo_el1((uint64_t)plat_key);
- write_apdakeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[2] = plat_key;
-
- if (is_pauth_key_enabled(SCTLR_EnDB_BIT)) {
- /* Read APDBKey_EL1 */
- plat_key = read_apdbkeylo_el1() |
- ((uint128_t)(read_apdbkeyhi_el1()) << 64);
- INFO("EnDB is set\n");
- } else {
- /* Program APDBKey_EL1 */
- plat_key = init_apkey();
- write_apdbkeylo_el1((uint64_t)plat_key);
- write_apdbkeyhi_el1((uint64_t)(plat_key >> 64));
- }
- pauth_keys_before[3] = plat_key;
- }
-
- /*
- * It is safe to assume that Generic Pointer authentication code key
- * APGAKey_EL1 can be re-programmed, as this key is not set in
- * TF-A Test suite and PACGA instruction is not used.
- */
- if (is_armv8_3_pauth_gpa_gpi_present()) {
- /* Program APGAKey_EL1 */
- plat_key = init_apkey();
- write_apgakeylo_el1((uint64_t)plat_key);
- write_apgakeyhi_el1((uint64_t)(plat_key >> 64));
- pauth_keys_before[4] = plat_key;
- }
-
- isb();
-}
-
-/*
- * Read ARMv8.3-PAuth keys and store them in
- * <pauth_keys_after> buffer
- */
-static void read_pauth_keys(void)
-{
- memset(pauth_keys_after, 0, NUM_KEYS * sizeof(uint128_t));
-
- if (is_armv8_3_pauth_apa_api_present()) {
- /* Read APIAKey_EL1 */
- pauth_keys_after[0] = read_apiakeylo_el1() |
- ((uint128_t)(read_apiakeyhi_el1()) << 64);
-
- /* Read APIBKey_EL1 */
- pauth_keys_after[1] = read_apibkeylo_el1() |
- ((uint128_t)(read_apibkeyhi_el1()) << 64);
-
- /* Read APDAKey_EL1 */
- pauth_keys_after[2] = read_apdakeylo_el1() |
- ((uint128_t)(read_apdakeyhi_el1()) << 64);
-
- /* Read APDBKey_EL1 */
- pauth_keys_after[3] = read_apdbkeylo_el1() |
- ((uint128_t)(read_apdbkeyhi_el1()) << 64);
- }
-
- if (is_armv8_3_pauth_gpa_gpi_present()) {
- /* Read APGAKey_EL1 */
- pauth_keys_after[4] = read_apgakeylo_el1() |
- ((uint128_t)(read_apgakeyhi_el1()) << 64);
- }
-}
-#endif /* __aarch64__ */
+#endif
/*
* TF-A is expected to allow access to key registers from lower EL's,
@@ -174,7 +28,7 @@ test_result_t test_pauth_reg_access(void)
SKIP_TEST_IF_AARCH32();
#ifdef __aarch64__
SKIP_TEST_IF_PAUTH_NOT_SUPPORTED();
- read_pauth_keys();
+ pauth_test_lib_read_keys(pauth_keys_before);
return TEST_RESULT_SUCCESS;
#endif /* __aarch64__ */
}
@@ -188,13 +42,11 @@ test_result_t test_pauth_leakage(void)
SKIP_TEST_IF_AARCH32();
#ifdef __aarch64__
SKIP_TEST_IF_PAUTH_NOT_SUPPORTED();
- set_store_pauth_keys();
+ pauth_test_lib_read_keys(pauth_keys_before);
tftf_get_psci_version();
- read_pauth_keys();
-
- return compare_pauth_keys();
+ return pauth_test_lib_compare_template(pauth_keys_before, pauth_keys_after);
#endif /* __aarch64__ */
}
@@ -220,7 +72,6 @@ test_result_t test_pauth_instructions(void)
ARM_ARCH_MAJOR, ARM_ARCH_MINOR);
return TEST_RESULT_SKIPPED;
#endif /* ARM_ARCH_AT_LEAST(8, 3) */
-
#endif /* __aarch64__ */
}
@@ -238,7 +89,7 @@ test_result_t test_pauth_leakage_tsp(void)
SKIP_TEST_IF_PAUTH_NOT_SUPPORTED();
SKIP_TEST_IF_TSP_NOT_PRESENT();
- set_store_pauth_keys();
+ pauth_test_lib_fill_regs_and_template(pauth_keys_before);
/* Standard SMC to ADD two numbers */
tsp_svc_params.fid = TSP_STD_FID(TSP_ADD);
@@ -260,8 +111,6 @@ test_result_t test_pauth_leakage_tsp(void)
return TEST_RESULT_FAIL;
}
- read_pauth_keys();
-
- return compare_pauth_keys();
+ return pauth_test_lib_compare_template(pauth_keys_before, pauth_keys_after);
#endif /* __aarch64__ */
}
diff --git a/tftf/tests/extensions/pmuv3/test_pmuv3.c b/tftf/tests/extensions/pmuv3/test_pmuv3.c
new file mode 100644
index 000000000..725b4e716
--- /dev/null
+++ b/tftf/tests/extensions/pmuv3/test_pmuv3.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <test_helpers.h>
+
+/* tests target aarch64. Aarch32 is too different to even build */
+#if defined(__aarch64__)
+
+#define PMU_EVT_INST_RETIRED 0x0008
+#define NOP_REPETITIONS 50
+#define MAX_COUNTERS 32
+
+static inline void read_all_counters(u_register_t *array, int impl_ev_ctrs)
+{
+ array[0] = read_pmccntr_el0();
+ for (int i = 0; i < impl_ev_ctrs; i++) {
+ array[i + 1] = read_pmevcntrn_el0(i);
+ }
+}
+
+static inline void read_all_counter_configs(u_register_t *array, int impl_ev_ctrs)
+{
+ array[0] = read_pmccfiltr_el0();
+ for (int i = 0; i < impl_ev_ctrs; i++) {
+ array[i + 1] = read_pmevtypern_el0(i);
+ }
+}
+
+static inline void read_all_pmu_configs(u_register_t *array)
+{
+ array[0] = read_pmcntenset_el0();
+ array[1] = read_pmcr_el0();
+ array[2] = read_pmselr_el0();
+ array[3] = (IS_IN_EL2()) ? read_mdcr_el2() : 0;
+}
+
+static inline void enable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_E_BIT);
+ /* this function means we are about to use the PMU, synchronize */
+ isb();
+}
+
+static inline void disable_counting(void)
+{
+ write_pmcr_el0(read_pmcr_el0() & ~PMCR_EL0_E_BIT);
+ /* we also rely that disabling really did work */
+ isb();
+}
+
+static inline void clear_counters(void)
+{
+ write_pmcr_el0(read_pmcr_el0() | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
+}
+
+/*
+ * tftf runs in EL2, don't bother enabling counting at lower ELs and secure
+ * world. TF-A has other controls for them and counting there doesn't impact us
+ */
+static inline void enable_cycle_counter(void)
+{
+ write_pmccfiltr_el0(PMCCFILTR_EL0_NSH_BIT);
+ write_pmcntenset_el0(read_pmcntenset_el0() | PMCNTENSET_EL0_C_BIT);
+}
+
+static inline void enable_event_counter(int ctr_num)
+{
+ write_pmevtypern_el0(ctr_num, PMEVTYPER_EL0_NSH_BIT |
+ (PMU_EVT_INST_RETIRED & PMEVTYPER_EL0_EVTCOUNT_BITS));
+ write_pmcntenset_el0(read_pmcntenset_el0() |
+ PMCNTENSET_EL0_P_BIT(ctr_num));
+}
+
+/* doesn't really matter what happens, as long as it happens a lot */
+static inline void execute_nops(void)
+{
+ for (int i = 0; i < NOP_REPETITIONS; i++) {
+ __asm__ ("orr x0, x0, x0\n");
+ }
+}
+
+static inline void execute_el3_nop(void)
+{
+ /* ask EL3 for some info, no side effects */
+ smc_args args = { SMCCC_VERSION };
+
+ /* return values don't matter */
+ tftf_smc(&args);
+}
+
+#endif /* defined(__aarch64__) */
+
+/*
+ * try the cycle counter with some NOPs to see if it works
+ */
+test_result_t test_pmuv3_cycle_works_ns(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t ccounter_start;
+ u_register_t ccounter_end;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ enable_cycle_counter();
+ enable_counting();
+
+ ccounter_start = read_pmccntr_el0();
+ execute_nops();
+ ccounter_end = read_pmccntr_el0();
+ disable_counting();
+ clear_counters();
+
+ tftf_testcase_printf("Counted from %ld to %ld\n",
+ ccounter_start, ccounter_end);
+ if (ccounter_start != ccounter_end) {
+ return TEST_RESULT_SUCCESS;
+ }
+ return TEST_RESULT_FAIL;
+#endif /* defined(__aarch64__) */
+}
+
+/*
+ * try an event counter with some NOPs to see if it works. MDCR_EL2.HPMN can
+ * make this tricky so take extra care.
+ */
+test_result_t test_pmuv3_event_works_ns(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t evcounter_start;
+ u_register_t evcounter_end;
+ u_register_t mdcr_el2 = ~0;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ /* use the real value or use the dummy value to skip checks later */
+ if (IS_IN_EL2()) {
+ mdcr_el2 = read_mdcr_el2();
+ }
+
+ if (((read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK) == 0) {
+ tftf_testcase_printf("No event counters implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* FEAT_HPMN0 only affects event counters */
+ if ((mdcr_el2 & MDCR_EL2_HPMN_MASK) == 0) {
+ if (!get_feat_hpmn0_supported()) {
+ tftf_testcase_printf(
+ "FEAT_HPMN0 not implemented but HPMN is 0\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* the test will fail in this case */
+ if ((mdcr_el2 & MDCR_EL2_HPME_BIT) == 0) {
+ tftf_testcase_printf(
+ "HPMN is 0 and HPME is not set!\n");
+ }
+ }
+
+ enable_event_counter(0);
+ enable_counting();
+
+ /*
+ * if any are enabled it will be the very first one. HPME can disable
+ * the higher end of the counters and HPMN can put the boundary
+ * anywhere
+ */
+ evcounter_start = read_pmevcntrn_el0(0);
+ execute_nops();
+ evcounter_end = read_pmevcntrn_el0(0);
+ disable_counting();
+ clear_counters();
+
+ tftf_testcase_printf("Counted from %ld to %ld\n",
+ evcounter_start, evcounter_end);
+ if (evcounter_start != evcounter_end) {
+ return TEST_RESULT_SUCCESS;
+ }
+ return TEST_RESULT_FAIL;
+#endif /* defined(__aarch64__) */
+}
+
+
+/*
+ * check if entering/exiting EL3 (with a NOP) preserves all PMU registers.
+ */
+test_result_t test_pmuv3_el3_preserves(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#if defined(__aarch64__)
+ u_register_t ctr_start[MAX_COUNTERS] = {0};
+ u_register_t ctr_cfg_start[MAX_COUNTERS] = {0};
+ u_register_t pmu_cfg_start[4];
+ u_register_t ctr_end[MAX_COUNTERS] = {0};
+ u_register_t ctr_cfg_end[MAX_COUNTERS] = {0};
+ u_register_t pmu_cfg_end[4];
+ int impl_ev_ctrs;
+
+ SKIP_TEST_IF_PMUV3_NOT_SUPPORTED();
+
+ impl_ev_ctrs = (read_pmcr_el0() >> PMCR_EL0_N_SHIFT) & PMCR_EL0_N_MASK;
+
+ /* start from 0 so we know we can't overflow */
+ clear_counters();
+ /* pretend counters have just been used */
+ enable_cycle_counter();
+ enable_event_counter(0);
+ enable_counting();
+ execute_nops();
+ disable_counting();
+
+ /* get before reading */
+ read_all_counters(ctr_start, impl_ev_ctrs);
+ read_all_counter_configs(ctr_cfg_start, impl_ev_ctrs);
+ read_all_pmu_configs(pmu_cfg_start);
+
+ /* give EL3 a chance to scramble everything */
+ execute_el3_nop();
+
+ /* get after reading */
+ read_all_counters(ctr_end, impl_ev_ctrs);
+ read_all_counter_configs(ctr_cfg_end, impl_ev_ctrs);
+ read_all_pmu_configs(pmu_cfg_end);
+
+ if (memcmp(ctr_start, ctr_end, sizeof(ctr_start)) != 0) {
+ tftf_testcase_printf("SMC call did not preserve counters\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (memcmp(ctr_cfg_start, ctr_cfg_end, sizeof(ctr_cfg_start)) != 0) {
+ tftf_testcase_printf("SMC call did not preserve counter config\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (memcmp(pmu_cfg_start, pmu_cfg_end, sizeof(pmu_cfg_start)) != 0) {
+ tftf_testcase_printf("SMC call did not preserve PMU registers\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+#endif /* defined(__aarch64__) */
+}
diff --git a/tftf/tests/extensions/rng_trap/test_rng_trap.c b/tftf/tests/extensions/rng_trap/test_rng_trap.c
new file mode 100644
index 000000000..49ee6adfc
--- /dev/null
+++ b/tftf/tests/extensions/rng_trap/test_rng_trap.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <test_helpers.h>
+#include <tftf.h>
+#include <tftf_lib.h>
+
+#define MAX_ITERATIONS_EXCLUSIVE 100
+
+/*
+ * This test ensures that a RNDR/RNDRRS instructions causes a trap to EL3 and
+ * generates a random number each time.
+ * Argument "use_rndrrs" decides whether to execute "rndrrs" or "rndr" instruction.
+ *
+ * This test usage load/store exclusive pairs to detect whether the execution
+ * trapped to EL3 or not?
+ * It relies on the fact that when exception level changes the monitor is cleared.
+ * In a load/store exclusive pair with stxr instruction, the status gets updated
+ * to '1' when monitor is cleared.
+ * In this test start with ldxr and execute trap instruction and if the trap to EL3
+ * happened then stxr status will be '1', to avoid chances of monitor being cleared
+ * (highly unlikely in this scenario) by any other reason do this test iteratively.
+ * If stxr succeds even single time we are sure that trap did not happen.
+ */
+static test_result_t test_rng_trap(bool use_rndrrs)
+{
+#if defined __aarch64__
+ u_register_t rng, rng1 = 0;
+ u_register_t exclusive;
+ u_register_t status;
+ unsigned int i;
+
+ /* Make sure FEAT_RNG_TRAP is supported. */
+ SKIP_TEST_IF_RNG_TRAP_NOT_SUPPORTED();
+
+ /*
+ * The test was inserted in a loop that runs a safe number of times
+ * in order to discard any possible trap returns other than RNG_TRAP
+ */
+ for (i = 0; i < MAX_ITERATIONS_EXCLUSIVE; i++) {
+ /* Attempt to acquire address for exclusive access */
+ __asm__ volatile ("ldxr %0, %1\n" : "=r"(rng)
+ : "Q"(exclusive));
+ if (use_rndrrs) {
+ /* Attempt to read RNDRRS. */
+ __asm__ volatile ("mrs %0, rndrrs\n" : "=r" (rng));
+ } else {
+ /* Attempt to read RNDR. */
+ __asm__ volatile ("mrs %0, rndr\n" : "=r" (rng));
+ }
+ /*
+ * After returning from the trap, the monitor variable should
+ * be cleared, so the status value should be 1.
+ */
+ __asm__ volatile ("stxr %w0, %1, %2\n" : "=&r"(status)
+ : "r"(rng), "Q"(exclusive));
+ /* If monitor is not cleared or not a new random number */
+ if ((status == 0) || (rng == rng1)) {
+ return TEST_RESULT_FAIL;
+ }
+ rng1 = rng;
+ }
+
+ return TEST_RESULT_SUCCESS;
+#else
+ /* Skip test if AArch32 */
+ SKIP_TEST_IF_AARCH32();
+#endif
+}
+
+/* Test RNDR read access causes a trap to EL3 and generates a random number each time */
+test_result_t test_rndr_rng_trap(void)
+{
+ return test_rng_trap(false);
+}
+
+/* Test RNDRRS read access causes a trap to EL3 and generates a random number each time */
+test_result_t test_rndrrs_rng_trap(void)
+{
+ return test_rng_trap(true);
+}
diff --git a/tftf/tests/extensions/sme/test_sme.c b/tftf/tests/extensions/sme/test_sme.c
new file mode 100644
index 000000000..39c64571b
--- /dev/null
+++ b/tftf/tests/extensions/sme/test_sme.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/sme.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+
+/* Global buffers*/
+static __aligned(16) uint64_t ZA_In_vector[8] = {0xaaff, 0xbbff, 0xccff, 0xddff, 0xeeff,
+ 0xffff, 0xff00, 0xff00};
+static __aligned(16) uint64_t ZA_Out_vector[8] = {0};
+
+/**
+ * sme_zero_ZA
+ * ZER0 : Zero a list of upto eight 64bit element ZA tiles.
+ * ZERO {<mask>} , where mask=ff, to clear all the 8, 64 bit elements.
+ */
+static void sme_zero_ZA(void)
+{
+ /**
+ * Due to the lack of support from the toolchain, instruction
+ * opcodes are used here.
+ * Manual Encoding Instruction, to Zero all the tiles of ZA array.
+ *
+ * TODO: Further, once the toolchain adds support for SME features
+ * this could be replaced with the actual instruction ZERO { <mask>}.
+ */
+ asm volatile(".inst 0xc008000f" : : : );
+}
+
+/**
+ * This function compares two buffers/vector elements
+ * Inputs: uint64_t *ZA_In_vector, ZA_Out_vector
+ * @return true : If both are equal
+ * @return false : If both are not equal
+ */
+static bool sme_cmp_vector(const uint64_t *ZA_In_vector, const uint64_t *ZA_Out_vector)
+{
+ bool ret = true;
+
+ for (int i = 0; i < (MAX_VL_B/8); i++) {
+ if (ZA_In_vector[i] != ZA_Out_vector[i]) {
+ ret = false;
+ }
+ }
+
+ return ret;
+}
+
+#endif /* __aarch64__ */
+
+test_result_t test_sme_support(void)
+{
+ /* SME is an AArch64-only feature.*/
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ u_register_t reg;
+ unsigned int current_vector_len;
+ unsigned int requested_vector_len;
+ unsigned int len_max;
+ unsigned int __unused svl_max = 0U;
+ u_register_t saved_smcr;
+
+ /* Skip the test if SME is not supported. */
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+
+ /* Make sure TPIDR2_EL0 is accessible. */
+ write_tpidr2_el0(0);
+ if (read_tpidr2_el0() != 0) {
+ ERROR("Could not read TPIDR2_EL0.\n");
+ return TEST_RESULT_FAIL;
+ }
+ write_tpidr2_el0(0xb0bafe77);
+ if (read_tpidr2_el0() != 0xb0bafe77) {
+ ERROR("Could not write TPIDR2_EL0.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Iterate through values for LEN to detect supported vector lengths.
+ */
+
+ /* Entering Streaming SVE mode */
+ sme_smstart(SMSTART_SM);
+
+ saved_smcr = read_smcr_el2();
+
+ /* Write SMCR_EL2 with the LEN max to find implemented width. */
+ write_smcr_el2(MASK(SMCR_ELX_RAZ_LEN));
+ isb();
+
+ len_max = (unsigned int)read_smcr_el2();
+ VERBOSE("Maximum SMCR_EL2.LEN value: 0x%x\n", len_max);
+ VERBOSE("Enumerating supported vector lengths...\n");
+ for (unsigned int i = 0; i <= len_max; i++) {
+ /* Load new value into SMCR_EL2.RAZ_LEN */
+ reg = read_smcr_el2();
+ reg &= ~(MASK(SMCR_ELX_RAZ_LEN));
+ reg |= INPLACE(SMCR_ELX_RAZ_LEN, i);
+ write_smcr_el2(reg);
+ isb();
+
+ /* Compute current and requested vector lengths in bits. */
+ current_vector_len = ((unsigned int)sme_rdsvl_1() * 8U);
+ requested_vector_len = (i + 1U) * 128U;
+
+ /*
+ * We count down from the maximum SMLEN value, so if the values
+ * match, we've found the largest supported value for SMLEN.
+ */
+ if (current_vector_len == requested_vector_len) {
+ svl_max = current_vector_len;
+ VERBOSE("SUPPORTED: %u bits (LEN=%u)\n",
+ requested_vector_len, i);
+ } else {
+ VERBOSE("NOT SUPPORTED: %u bits (LEN=%u)\n",
+ requested_vector_len, i);
+ }
+ }
+
+ INFO("Largest Supported Streaming Vector Length(SVL): %u bits\n",
+ svl_max);
+
+ /* Exiting Streaming SVE mode */
+ sme_smstop(SMSTOP_SM);
+
+ /**
+ * Perform/Execute SME Instructions.
+ * SME Data processing instructions LDR, STR, and ZERO instructions that
+ * access the SME ZA storage are legal only if ZA is enabled.
+ */
+
+ /* Enable SME ZA Array Storage */
+ sme_smstart(SMSTART_ZA);
+
+ /* LDR : Load vector to ZA Array */
+ sme_vector_to_ZA(ZA_In_vector);
+
+ /* STR : Store Vector from ZA Array. */
+ sme_ZA_to_vector(ZA_Out_vector);
+
+ /* Compare both vectors to ensure load and store instructions have
+ * executed precisely.
+ */
+ if (!sme_cmp_vector(ZA_In_vector, ZA_Out_vector)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Zero or clear the entire ZA Array Storage/Tile */
+ sme_zero_ZA();
+
+ /* Disable the SME ZA array storage. */
+ sme_smstop(SMSTOP_ZA);
+
+ /* If FEAT_SME_FA64 then attempt to execute an illegal instruction. */
+ if (is_feat_sme_fa64_supported()) {
+ VERBOSE("FA64 supported, trying illegal instruction.\n");
+ sme_try_illegal_instruction();
+ }
+
+ write_smcr_el2(saved_smcr);
+ isb();
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/sme/test_sme2.c b/tftf/tests/extensions/sme/test_sme2.c
new file mode 100644
index 000000000..e82da08ee
--- /dev/null
+++ b/tftf/tests/extensions/sme/test_sme2.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <lib/extensions/sme.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+
+#define SME2_ARRAYSIZE (512/64)
+#define SME2_INPUT_DATA (0x1fffffffffffffff)
+
+/* Global buffers */
+static __aligned(16) uint64_t sme2_input_buffer[SME2_ARRAYSIZE] = {0};
+static __aligned(16) uint64_t sme2_output_buffer[SME2_ARRAYSIZE] = {0};
+
+/*
+ * clear_ZT0: ZERO all bytes of the ZT0 register.
+ *
+ */
+static void clear_ZT0(void)
+{
+ /**
+ * Due to the lack of support from the toolchain, instruction
+ * opcodes are used here.
+ * TODO: Further, once the toolchain adds support for SME features
+ * this could be replaced with the instruction ZERO {ZT0}.
+ */
+ asm volatile(".inst 0xc0480001" : : : );
+}
+
+#endif /* __aarch64__ */
+
+/*
+ * test_sme2_support: Test SME2 support when the extension is enabled.
+ *
+ * Execute some SME2 instructions. These should not be trapped to EL3,
+ * as TF-A is responsible for enabling SME2 for Non-secure world.
+ *
+ */
+test_result_t test_sme2_support(void)
+{
+ /* SME2 is an AArch64-only feature.*/
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ /* Skip the test if SME2 is not supported. */
+ SKIP_TEST_IF_SME2_NOT_SUPPORTED();
+
+ /*
+ * FEAT_SME2 adds a 512 BIT architectural register ZT0 to support
+ * the lookup-table feature.
+ * System register SMCR_ELx defines a bit SMCR_ELx.EZT0 bit [30] to
+ * enable/disable access to this register. SMCR_EL2_RESET_VAL enables
+ * this bit by default.
+ *
+ * Instructions to access ZT0 register are being tested to ensure
+ * SMCR_EL3.EZT0 bit is set by EL3 firmware so that EL2 access are not
+ * trapped.
+ */
+
+ /* Make sure we can acesss SME2 ZT0 storage, PSTATE.ZA = 1*/
+ VERBOSE("Enabling SME ZA storage and ZT0 storage.\n");
+
+ sme_smstart(SMSTART_ZA);
+
+ /*
+ * LDR (ZT0) : Load ZT0 register.
+ * Load the 64-byte ZT0 register from the memory address
+ * provided in the 64-bit scalar base register.
+ */
+ for (int i = 0; i < SME2_ARRAYSIZE; i++) {
+ sme2_input_buffer[i] = SME2_INPUT_DATA;
+ }
+ sme2_load_zt0_instruction(sme2_input_buffer);
+
+ /*
+ * STR (ZT0) : Store ZT0 register.
+ * Store the 64-byte ZT0 register to the memory address
+ * provided in the 64-bit scalar base register
+ */
+
+ sme2_store_zt0_instruction(sme2_output_buffer);
+
+ /**
+ * compare the input and output buffer to verify the operations of
+ * LDR and STR instructions with ZT0 register.
+ */
+ for (int i = 0; i < SME2_ARRAYSIZE; i++) {
+ if (sme2_input_buffer[i] != sme2_output_buffer[i]) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* ZER0 (ZT0) */
+ clear_ZT0();
+
+ /* Finally disable the acesss to SME2 ZT0 storage, PSTATE.ZA = 0*/
+ VERBOSE("Disabling SME ZA storage and ZT0 storage.\n");
+
+ sme_smstop(SMSTOP_ZA);
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/spe/test_spe.c b/tftf/tests/extensions/spe/test_spe.c
new file mode 100644
index 000000000..d0d89ef50
--- /dev/null
+++ b/tftf/tests/extensions/spe/test_spe.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+
+test_result_t test_spe_support(void)
+{
+ /* SPE is an AArch64-only feature.*/
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ unsigned int spe_ver = spe_get_version();
+
+ assert(spe_ver <= ID_AA64DFR0_SPE_V1P4);
+
+ if (spe_ver == ID_AA64DFR0_SPE_NOT_SUPPORTED) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * If runtime-EL3 does not enable access to SPE system
+ * registers from NS-EL2/NS-EL1 then read of these
+ * registers traps in EL3
+ */
+ read_pmscr_el1();
+ read_pmsfcr_el1();
+ read_pmsicr_el1();
+ read_pmsidr_el1();
+ read_pmsirr_el1();
+ read_pmslatfr_el1();
+ read_pmblimitr_el1();
+ read_pmbptr_el1();
+ read_pmbsr_el1();
+ read_pmsevfr_el1();
+ if (IS_IN_EL2()) {
+ read_pmscr_el2();
+ }
+ if (spe_ver == ID_AA64DFR0_SPE_V1P2) {
+ read_pmsnevfr_el1();
+ }
+
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/sve/sve_operations.S b/tftf/tests/extensions/sve/sve_operations.S
deleted file mode 100644
index e528b2bfe..000000000
--- a/tftf/tests/extensions/sve/sve_operations.S
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <asm_macros.S>
-
-#include "./test_sve.h"
-
-#ifdef __aarch64__
-#if __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0)
-
-/*
- * Based on example code from the Arm Compiler Scalable Vector Extension User
- * Guide[1].
- * [1] https://developer.arm.com/docs/100891/latest/getting-started-with-the-sve-compiler/compiling-c-and-c-code-for-sve-enabled-targets
- */
-
- .arch armv8.2-a+crc+fp16+sve
- .global sve_subtract_arrays
-func sve_subtract_arrays
- mov x4, SVE_ARRAYSIZE
- mov x5, x4
- mov x3, 0
- whilelo p0.s, xzr, x4
-.loop:
- ld1w z0.s, p0/z, [x1, x3, lsl 2]
- ld1w z1.s, p0/z, [x2, x3, lsl 2]
- sub z0.s, z0.s, z1.s
- st1w z0.s, p0, [x0, x3, lsl 2]
- incw x3
- whilelo p0.s, x3, x5
- bne .loop
- ret
-endfunc sve_subtract_arrays
-
-#endif /* __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0) */
-#endif /* __aarch64__ */
diff --git a/tftf/tests/extensions/sve/test_sve.c b/tftf/tests/extensions/sve/test_sve.c
index 235e2b8e3..bdd76e180 100644
--- a/tftf/tests/extensions/sve/test_sve.c
+++ b/tftf/tests/extensions/sve/test_sve.c
@@ -8,15 +8,14 @@
#include <arch_helpers.h>
#include <debug.h>
#include <stdlib.h>
+#include <test_helpers.h>
#include <tftf_lib.h>
+#include <lib/extensions/sve.h>
#include "./test_sve.h"
#if __GNUC__ > 8 || (__GNUC__ == 8 && __GNUC_MINOR__ > 0)
-extern void sve_subtract_arrays(int *difference, const int *sve_op_1,
- const int *sve_op_2);
-
static int sve_difference[SVE_ARRAYSIZE];
static int sve_op_1[SVE_ARRAYSIZE];
static int sve_op_2[SVE_ARRAYSIZE];
@@ -32,11 +31,7 @@ static int sve_op_2[SVE_ARRAYSIZE];
*/
test_result_t test_sve_support(void)
{
- /* Check if SVE is implemented and usable */
- if (is_armv8_2_sve_present() == false) {
- tftf_testcase_printf("SVE support absent\n");
- return TEST_RESULT_SKIPPED;
- }
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
for (int i = 0; i < SVE_ARRAYSIZE; i++) {
/* Generate a random number between 200 and 299 */
@@ -46,7 +41,7 @@ test_result_t test_sve_support(void)
}
/* Perform SVE operations */
- sve_subtract_arrays(sve_difference, sve_op_1, sve_op_2);
+ sve_subtract_arrays(sve_difference, sve_op_1, sve_op_2, SVE_ARRAYSIZE);
return TEST_RESULT_SUCCESS;
}
diff --git a/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c
new file mode 100644
index 000000000..6c28c8718
--- /dev/null
+++ b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+#include "./test_sys_reg_trace.h"
+
+static uint32_t get_trace_arch_ver(void)
+{
+ uint32_t val = read_trcdevarch();
+ val = (val >> TRCDEVARCH_ARCHVER_SHIFT) & TRCDEVARCH_ARCHVER_MASK;
+
+ return val;
+}
+
+/*
+ * EL3 is expected to allow access to trace system registers from EL2.
+ * Reading these register will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+test_result_t test_sys_reg_trace_enabled(void)
+{
+ SKIP_TEST_IF_SYS_REG_TRACE_NOT_SUPPORTED();
+
+ /*
+ * Read few ETMv4 system trace registers to verify correct access
+ * been provided from EL3.
+ */
+ uint32_t trace_arch_ver __unused = get_trace_arch_ver();
+ read_trcauxctlr();
+ read_trcccctlr();
+ read_trcbbctlr();
+ read_trcclaimset();
+ read_trcclaimclr();
+
+ /*
+ * Read few ETE system trace registers to verify correct access
+ * been provided from EL3. ETE system trace register access are
+ * not possible from NS-EL2 in aarch32 state.
+ */
+#if __aarch64__
+ if (trace_arch_ver == TRCDEVARCH_ARCHVER_ETE) {
+ read_trcrsr();
+ read_trcextinselr0();
+ read_trcextinselr1();
+ read_trcextinselr2();
+ read_trcextinselr3();
+ }
+#endif /* __aarch64__ */
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h
new file mode 100644
index 000000000..640b82c28
--- /dev/null
+++ b/tftf/tests/extensions/sys_reg_trace/test_sys_reg_trace.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TEST_SYS_REG_TRACE_H
+#define TEST_SYS_REG_TRACE_H
+
+/* TRCEDEVARCH definitions */
+#define TRCDEVARCH_ARCHVER_SHIFT U(12)
+#define TRCDEVARCH_ARCHVER_MASK U(0xf)
+#define TRCDEVARCH_ARCHVER_ETE U(0x5)
+
+#endif /* TEST_SYS_REG_TRACE_H */
diff --git a/tftf/tests/extensions/trbe/test_trbe.c b/tftf/tests/extensions/trbe/test_trbe.c
new file mode 100644
index 000000000..8ef9576e9
--- /dev/null
+++ b/tftf/tests/extensions/trbe/test_trbe.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/*
+ * EL3 is expected to allow access to trace control registers from EL2.
+ * Reading these register will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+test_result_t test_trbe_enabled(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_TRBE_NOT_SUPPORTED();
+ read_trblimitr_el1();
+ read_trbptr_el1();
+ read_trbbaser_el1();
+ read_trbsr_el1();
+ read_trbmar_el1();
+ read_trbtrg_el1();
+ read_trbidr_el1();
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/extensions/trf/test_trf.c b/tftf/tests/extensions/trf/test_trf.c
new file mode 100644
index 000000000..eeb967db8
--- /dev/null
+++ b/tftf/tests/extensions/trf/test_trf.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <tftf.h>
+
+/*
+ * EL3 is expected to allow access to trace filter control registers from EL2.
+ * Reading these register will trap to EL3 and crash when EL3 has not
+ * allowed access.
+ */
+test_result_t test_trf_enabled(void)
+{
+ SKIP_TEST_IF_TRF_NOT_SUPPORTED();
+
+#ifdef __aarch64__
+ read_trfcr_el1();
+ read_trfcr_el2();
+#else
+ read_htrfcr();
+ read_trfcr();
+#endif /* __aarch64__ */
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/extensions/wfxt/test_wfxt.c b/tftf/tests/extensions/wfxt/test_wfxt.c
new file mode 100644
index 000000000..bb3e4866d
--- /dev/null
+++ b/tftf/tests/extensions/wfxt/test_wfxt.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+typedef enum {
+ EXEC_WFIT = 0,
+ EXEC_WFET
+} exec_wfxt;
+
+#ifdef __aarch64__
+static test_result_t test_wfxt_inst(exec_wfxt val, uint64_t ms)
+{
+ __asm__ volatile(".arch armv8.7-a");
+ uint64_t timer_cnt1, timer_cnt2, feed_cnt;
+ uint64_t timer_freq = read_cntfrq_el0();
+ uint64_t ms_to_counts = ((ms * timer_freq) / 1000U);
+
+ timer_cnt1 = virtualcounter_read();
+ feed_cnt = timer_cnt1 + ms_to_counts;
+
+ if (val == EXEC_WFIT) {
+ wfit(feed_cnt);
+ } else {
+ wfet(feed_cnt);
+ }
+
+ timer_cnt2 = virtualcounter_read();
+
+ /* Lapsed time should be at least equal to sleep time */
+ if ((timer_cnt2 - timer_cnt1) >= ms_to_counts) {
+ return TEST_RESULT_SUCCESS;
+ } else {
+ /* unlikely ends up here */
+ uint64_t lapsed_ms = ((timer_cnt2 - timer_cnt1) * 1000) / timer_freq;
+
+ ERROR("Time elapsed: actual(%llu)ms vs requested(%llu)ms \n",
+ lapsed_ms, ms);
+ return TEST_RESULT_FAIL;
+ }
+}
+#endif /* __aarch64__ */
+
+test_result_t test_wfet_instruction(void)
+{
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_WFXT_NOT_SUPPORTED();
+
+ /*
+ * first invocation of wfe returns immediately clearing the event
+ * register
+ */
+ sevl();
+ wfe();
+
+ return test_wfxt_inst(EXEC_WFET, 10);
+#endif /* __aarch64__ */
+}
+
+test_result_t test_wfit_instruction(void)
+{
+ test_result_t ret;
+
+ SKIP_TEST_IF_AARCH32();
+
+#ifdef __aarch64__
+ SKIP_TEST_IF_WFXT_NOT_SUPPORTED();
+
+ /* disable irqs to run wfi till timeout */
+ disable_irq();
+
+ ret = test_wfxt_inst(EXEC_WFIT, 10);
+
+ /* enable irq back */
+ enable_irq();
+#endif /* __aarch64__ */
+
+ return ret;
+}
diff --git a/tftf/tests/misc_tests/inject_serror.S b/tftf/tests/misc_tests/inject_ras_error.S
index d42441dd3..1798a9049 100644
--- a/tftf/tests/misc_tests/inject_serror.S
+++ b/tftf/tests/misc_tests/inject_ras_error.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -10,8 +10,8 @@
#ifdef __aarch64__
- .globl inject_serror
- .globl inject_uncontainable
+ .globl inject_unrecoverable_ras_error
+ .globl inject_uncontainable_ras_error
.globl serror_sdei_event_handler
/*
@@ -21,13 +21,8 @@
* x0: Fault record number to program
* x1: Injected fault properties
* x2: Type of error to be generated
- * x3: Memory location to wait for, or 0 if no waiting is required
*/
-func inject_serror_record
- /* Clear SError received flag if necessary */
- cbz x3, 1f
- str xzr, [x3, #0]
-1:
+func inject_ras_error_record
/* Choose Error record 0 on the PE */
msr ERRSELR_EL1, x0
isb
@@ -45,25 +40,11 @@ func inject_serror_record
msr ERXPFGCTL_EL1, x2
isb
- /* If no waiting is required, jump to end */
- cbz x3, 3f
-
- sevl
-
-2:
- wfe
- ldr x0, [x3, #0]
- cbz x0, 2b
-
-3:
ret
-endfunc inject_serror_record
+endfunc inject_ras_error_record
-/*
- * Inject Unrecoverable error through fault record 0. Wait until serror_received
- * is set by the SDEI handler in response to receving the event.
- */
-func inject_serror
+/* Inject Unrecoverable error through fault record 0. */
+func inject_unrecoverable_ras_error
/* Inject fault into record 0 */
mov x0, #0
@@ -74,18 +55,14 @@ func inject_serror
/* Injected fault control */
mov x2, #ERXPFGCTL_UEU_BIT
- /* Wait address */
- adrp x3, serror_received
- add x3, x3, :lo12:serror_received
-
- b inject_serror_record
-endfunc inject_serror
+ b inject_ras_error_record
+endfunc inject_unrecoverable_ras_error
/*
* Inject Uncontainable error through fault record 0. This function doesn't wait
* as the handling is terminal in EL3.
*/
-func inject_uncontainable
+func inject_uncontainable_ras_error
/* Inject fault into record 0 */
mov x0, #0
@@ -94,18 +71,15 @@ func inject_uncontainable
/* Injected fault control */
mov x2, #ERXPFGCTL_UC_BIT
- /* Nothing to wait for */
- mov x3, xzr
-
- b inject_serror_record
-endfunc inject_uncontainable
+ b inject_ras_error_record
+endfunc inject_uncontainable_ras_error
/*
* SDEI event handler for SErrors.
*/
func serror_sdei_event_handler
stp x29, x30, [sp, #-16]!
- bl serror_handler
+ bl sdei_handler
ldp x29, x30, [sp], #16
mov_imm x0, SDEI_EVENT_COMPLETE
mov x1, xzr
diff --git a/tftf/tests/misc_tests/test_ea_ffh.c b/tftf/tests/misc_tests/test_ea_ffh.c
new file mode 100644
index 000000000..911962e1a
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ea_ffh.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <mmio.h>
+#include <tftf_lib.h>
+#include <smccc.h>
+#include <xlat_tables_v2.h>
+
+#define TEST_ADDRESS UL(0x7FFFF000)
+
+/*
+ * Purpose of these tests is to ensure EA from lower EL trap/handled in EL3.
+ *
+ * Tests HANDLE_EA_EL3_FIRST_NS feature(SCR_EL3.EA = 1) of TF-A
+ *
+ * Works in conjunction with PLATFORM_TEST_EA_FFH macro in TF-A.
+ */
+
+/*
+ * This test maps a non-existent memory as Device memory and reads it.
+ * Memory is mapped as device and cause an error on bus and trap as an Sync EA.
+ */
+test_result_t test_inject_syncEA(void)
+{
+ int rc;
+
+ rc = mmap_add_dynamic_region(TEST_ADDRESS, TEST_ADDRESS, PAGE_SIZE,
+ MT_DEVICE | MT_RO | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mapping address %lu(%d) failed\n",
+ __LINE__, TEST_ADDRESS, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Try reading invalid address, which will cause an exception to be handled in EL3.
+ * EL3 after handling the exception returns to the next instruction to avoid
+ * continous exceptions.
+ */
+ rc = mmio_read_32(TEST_ADDRESS);
+
+ rc = mmap_remove_dynamic_region(TEST_ADDRESS, PAGE_SIZE);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_remove_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This test maps a non-existent memory as Device memory and write to it.
+ * Memory is mapped as device and cause an error on bus and trap as an SError.
+ */
+test_result_t test_inject_serror(void)
+{
+ int rc;
+
+ rc = mmap_add_dynamic_region(TEST_ADDRESS, TEST_ADDRESS, PAGE_SIZE,
+ MT_DEVICE | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mapping address %lu(%d) failed\n",
+ __LINE__, TEST_ADDRESS, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Try writing to invalid address */
+ mmio_write_32(TEST_ADDRESS, 1);
+
+ rc = mmap_remove_dynamic_region(TEST_ADDRESS, PAGE_SIZE);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_remove_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/misc_tests/test_firmware_handoff.c b/tftf/tests/misc_tests/test_firmware_handoff.c
new file mode 100644
index 000000000..bd565aefa
--- /dev/null
+++ b/tftf/tests/misc_tests/test_firmware_handoff.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2023, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <transfer_list.h>
+
+extern u_register_t hw_config_base;
+extern u_register_t ns_tl;
+extern u_register_t tl_signature;
+
+#define DTB_PREAMBLE U(0xedfe0dd0)
+
+test_result_t test_handoff_header(void)
+{
+ struct transfer_list_header *tl = (struct transfer_list_header *)ns_tl;
+
+ assert((uint32_t)tl_signature ==
+ (REGISTER_CONVENTION_VERSION_MASK | TRANSFER_LIST_SIGNATURE));
+
+ if (transfer_list_check_header(tl) == TL_OPS_NON) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_handoff_dtb_payload(void)
+{
+ tftf_testcase_printf("Validating HW_CONFIG from transfer list.\n");
+ struct transfer_list_header *tl = (struct transfer_list_header *)ns_tl;
+ struct transfer_list_entry *te = (void *)tl + tl->hdr_size;
+ uintptr_t dtb_ptr;
+
+ te = transfer_list_find(tl, TL_TAG_FDT);
+
+ if (te == NULL) {
+ tftf_testcase_printf(
+ "Failed to find HW CONFIG TE in transfer list!");
+ return TEST_RESULT_FAIL;
+ }
+
+ dtb_ptr = (unsigned long)transfer_list_entry_data(te);
+
+ if ((dtb_ptr != hw_config_base) &&
+ (*(uint32_t *)dtb_ptr != DTB_PREAMBLE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/misc_tests/test_invalid_access.c b/tftf/tests/misc_tests/test_invalid_access.c
new file mode 100644
index 000000000..3baeed554
--- /dev/null
+++ b/tftf/tests/misc_tests/test_invalid_access.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "ffa_helpers.h"
+#include <plat/common/platform.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arch_features.h>
+#include <debug.h>
+#ifdef __aarch64__
+#include <spm_test_helpers.h>
+#include <sync.h>
+#endif
+#include <host_realm_helper.h>
+#include <lib/aarch64/arch_features.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_v2.h>
+#include <platform_def.h>
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+
+/*
+ * Using "__aarch64__" here looks weird but its unavoidable because of following reason
+ * This test is part of standard test which runs on all platforms but pre-requisite
+ * to run this test (custom sync exception handler) is only implemented for aarch64.
+ * TODO: Write a framework so that tests kept in standard list can be selectively
+ * run on a given architecture
+ */
+#ifdef __aarch64__
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+
+static volatile bool sync_exception_triggered;
+static volatile bool data_abort_triggered;
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+};
+
+static __aligned(PAGE_SIZE) uint64_t share_page[PAGE_SIZE / sizeof(uint64_t)];
+
+static bool data_abort_handler(void)
+{
+ uint64_t esr_elx = IS_IN_EL2() ? read_esr_el2() : read_esr_el1();
+ unsigned int rme_supported = get_armv9_2_feat_rme_support();
+
+ sync_exception_triggered = true;
+
+ VERBOSE("%s esr_elx %llx\n", __func__, esr_elx);
+
+ if (EC_BITS(esr_elx) == EC_DABORT_CUR_EL) {
+ if (rme_supported == 0) {
+ /* Synchronous external data abort triggered by trustzone controller */
+ if ((ISS_BITS(esr_elx) & ISS_DFSC_MASK) == DFSC_EXT_DABORT) {
+ VERBOSE("%s TZC Data Abort caught\n", __func__);
+ data_abort_triggered = true;
+ return true;
+ }
+ } else {
+ /* Synchronous data abort triggered by Granule protection */
+ if ((ISS_BITS(esr_elx) & ISS_DFSC_MASK) == DFSC_GPF_DABORT) {
+ VERBOSE("%s GPF Data Abort caught\n", __func__);
+ data_abort_triggered = true;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+test_result_t el3_memory_cannot_be_accessed_in_ns(void)
+{
+ const uintptr_t test_address = EL3_MEMORY_ACCESS_ADDR;
+
+ VERBOSE("Attempt to access el3 memory (0x%lx)\n", test_address);
+
+ sync_exception_triggered = false;
+ data_abort_triggered = false;
+
+ int rc = mmap_add_dynamic_region(test_address, test_address, PAGE_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ register_custom_sync_exception_handler(data_abort_handler);
+ *((volatile uint64_t *)test_address);
+ unregister_custom_sync_exception_handler();
+
+ rc = mmap_remove_dynamic_region(test_address, PAGE_SIZE);
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_remove_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (sync_exception_triggered == false) {
+ tftf_testcase_printf("No sync exception while accessing (0x%lx)\n", test_address);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (data_abort_triggered == false) {
+ tftf_testcase_printf("Sync exception is not data abort\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * @Test_Aim@ Check a realm region cannot be accessed from normal world.
+ *
+ * This test delegates a TFTF allocated buffer to Realm. It then attempts
+ * a read access to the region from normal world. This results in the PE
+ * triggering a GPF caught by a custom synchronous abort handler.
+ *
+ */
+test_result_t rl_memory_cannot_be_accessed_in_ns(void)
+{
+ test_result_t result = TEST_RESULT_FAIL;
+ u_register_t retmm;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ sync_exception_triggered = false;
+ data_abort_triggered = false;
+ register_custom_sync_exception_handler(data_abort_handler);
+
+ /* First read access to the test region must not fail. */
+ *((volatile uint64_t *)share_page);
+
+ if ((sync_exception_triggered != false) ||
+ (data_abort_triggered != false)) {
+ goto out_unregister;
+ }
+
+ host_rmi_init_cmp_result();
+
+ /* Delegate the shared page to Realm. */
+ retmm = host_rmi_granule_delegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("%s() failed\n", "host_rmi_granule_delegate");
+ goto out_unregister;
+ }
+
+ /* This access shall trigger a GPF. */
+ *((volatile uint64_t *)share_page);
+
+ if ((sync_exception_triggered != true) ||
+ (data_abort_triggered != true)) {
+ goto out_undelegate;
+ }
+
+ result = host_cmp_result();
+
+out_undelegate:
+ /* Undelegate the shared page. */
+ retmm = host_rmi_granule_undelegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("Granule undelegate failed, ret=0x%lx\n", retmm);
+ }
+
+out_unregister:
+ unregister_custom_sync_exception_handler();
+
+ return result;
+}
+
+/**
+ * @Test_Aim@ Check a secure region cannot be accessed from normal world.
+ *
+ * Following test intends to run on RME enabled platforms when EL3
+ * is Root world. In a non RME platform, EL3 is secure.
+ * Access to secure memory from NS world is already covered
+ * by el3_memory_cannot_be_accessed_in_ns.
+ */
+test_result_t s_memory_cannot_be_accessed_in_ns(void)
+{
+ const uintptr_t test_address = SECURE_MEMORY_ACCESS_ADDR;
+
+ /* skipp non RME platforms */
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ VERBOSE("Attempt to access secure memory (0x%lx)\n", test_address);
+
+ data_abort_triggered = false;
+ sync_exception_triggered = false;
+ register_custom_sync_exception_handler(data_abort_handler);
+ dsbsy();
+
+ int rc = mmap_add_dynamic_region(test_address, test_address, PAGE_SIZE,
+ MT_MEMORY | MT_RW | MT_NS);
+
+ if (rc != 0) {
+ tftf_testcase_printf("%d: mmap_add_dynamic_region() = %d\n", __LINE__, rc);
+ return TEST_RESULT_FAIL;
+ }
+
+ *((volatile uint64_t *)test_address);
+
+ mmap_remove_dynamic_region(test_address, PAGE_SIZE);
+
+ dsbsy();
+ unregister_custom_sync_exception_handler();
+
+ if (sync_exception_triggered == false) {
+ tftf_testcase_printf("No sync exception while accessing (0x%lx)\n", test_address);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (data_abort_triggered == false) {
+ tftf_testcase_printf("Sync exception is not data abort\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t memory_cannot_be_accessed_in_rl(u_register_t params)
+{
+ u_register_t retrmm;
+ test_result_t result = TEST_RESULT_FAIL;
+ static char rd[GRANULE_SIZE] __aligned(GRANULE_SIZE);
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ retrmm = host_rmi_granule_delegate((u_register_t)&rd[0]);
+ if (retrmm != 0UL) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_rmi_granule_delegate",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Create a realm using a parameter in a secure physical address space should fail. */
+ retrmm = host_rmi_realm_create((u_register_t)&rd[0], params);
+ if (retrmm == 0UL) {
+ ERROR("Realm create operation should fail, %lx\n", retrmm);
+ retrmm = host_rmi_realm_destroy((u_register_t)&rd[0]);
+ if (retrmm != 0UL) {
+ ERROR("Realm destroy operation returns fail, %lx\n", retrmm);
+ }
+ } else if (retrmm != RMI_ERROR_INPUT) {
+ ERROR("Realm create operation should fail with code:%d retrmm:%ld\n",
+ RMI_ERROR_INPUT, retrmm);
+ } else {
+ result = TEST_RESULT_SUCCESS;
+ }
+
+ retrmm = host_rmi_granule_undelegate((u_register_t)&rd[0]);
+ if (retrmm != 0UL) {
+ INFO("Undelegate operation returns 0x%lx\n", retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (result == TEST_RESULT_SUCCESS) {
+ return host_cmp_result();
+ }
+
+ return TEST_RESULT_FAIL;
+}
+
+/**
+ * @Test_Aim@ Check a root region cannot be accessed from a secure partition.
+ *
+ * A hardcoded address marked Root in the GPT is shared to a secure
+ * partition. The operation fails given the memory shared needs to be
+ * preconfigured in the memory ranges described in the SPMC manifest. The ranges
+ * related with S/NS memory that the SP can access shall never contain
+ * realm/root memory as this incurs into a configuration error.
+ * This test validates the SP can't get access to root memory via FF-A memory
+ * sharing interfaces.
+ */
+test_result_t rt_memory_cannot_be_accessed_in_s(void)
+{
+ const uintptr_t test_address = EL3_MEMORY_ACCESS_ADDR;
+ struct ffa_memory_region_constituent constituents[] = {
+ {
+ (void *)test_address, 1, 0
+ }
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ ffa_memory_handle_t handle;
+ struct mailbox_buffers mb;
+ struct ffa_value ret;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_SHARE_SMC32);
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ handle = memory_init_and_send(mb.send, PAGE_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ FFA_MEM_SHARE_SMC32, &ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ return TEST_RESULT_FAIL;
+}
+
+test_result_t s_memory_cannot_be_accessed_in_rl(void)
+{
+ u_register_t params = (u_register_t)SECURE_MEMORY_ACCESS_ADDR;
+ return memory_cannot_be_accessed_in_rl(params);
+}
+
+test_result_t rt_memory_cannot_be_accessed_in_rl(void)
+{
+ u_register_t params = (u_register_t)EL3_MEMORY_ACCESS_ADDR;
+ return memory_cannot_be_accessed_in_rl(params);
+}
+
+#else
+
+test_result_t el3_memory_cannot_be_accessed_in_ns(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t rl_memory_cannot_be_accessed_in_ns(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t s_memory_cannot_be_accessed_in_ns(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t s_memory_cannot_be_accessed_in_rl(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t rt_memory_cannot_be_accessed_in_rl(void)
+{
+ tftf_testcase_printf("Test not ported to AArch32\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif /* __aarch64__ */
diff --git a/tftf/tests/misc_tests/test_nop.c b/tftf/tests/misc_tests/test_nop.c
new file mode 100644
index 000000000..d3c4386ab
--- /dev/null
+++ b/tftf/tests/misc_tests/test_nop.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <power_management.h>
+#include <test_helpers.h>
+
+__attribute__((noinline))
+static void debug_hook_func(void)
+{
+ __asm__ volatile(
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "debug_hook:\n"
+ ".global debug_hook\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ );
+
+ return;
+}
+
+static test_result_t secondary_cpu(void)
+{
+ debug_hook_func();
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * This is intended for use in conjunction with Trusted Firmware eXplorer
+ * (TFX).
+ *
+ * 1. Power up all secondary CPUs and execute test_nop.
+ * 2. TFX is expected to set a breakpoint on debug_hook. When this is hit,
+ * TFX takes over control and starts injecting test code.
+ * 3. Once the test is complete, TFX powers down all CPUs.
+ */
+test_result_t test_nop(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Start all other CPUs */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(target_mpid, (uintptr_t)secondary_cpu, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x0x%llx\n", (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Do the actual work */
+ debug_hook_func();
+
+ /* Wait for other CPUs to complete */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) != PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/misc_tests/test_ras_ffh_nested.c b/tftf/tests/misc_tests/test_ras_ffh_nested.c
new file mode 100644
index 000000000..99c71b750
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ras_ffh_nested.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <psci.h>
+#include <sdei.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+static volatile uint64_t sdei_event_received;
+extern void inject_unrecoverable_ras_error(void);
+extern int serror_sdei_event_handler(int ev, uint64_t arg);
+
+int sdei_handler(int ev, uint64_t arg)
+{
+ sdei_event_received = 1;
+ tftf_testcase_printf("SError SDEI event received.\n");
+ return 0;
+}
+
+/*
+ * Test to verify nested exception handling of SErrors in EL3.
+ *
+ * This test exercises the path of EL3 nested exception handling of SErrors
+ * during SMC exception handling. In SMC exception handling vector path
+ * during synchronization of errors, a pending async EA is detected which
+ * gets handled in EL3 (as it is in FFH mode) as a nested exception. Original
+ * SMC call is handled after async EA is handled.
+ *
+ * This test works in conjunction with "ras_ffh_nested.patch"
+ * present in CI repository.
+ *
+ * Test steps:
+ * 1. TF-A is build for Firmware first handling for RAS errors.
+ * 2. Register/enable SDEI event notification for RAS error.
+ * 3. Make an SMC call to get the SMCCC version which will be used for
+ * comparing later on, along with that it also changes SCR_EL3.EA=0 to
+ * route SError to TFTF. This allow SError to be pended when next SMC
+ * call is made.
+ * 4. Disable SError (PSTATE.A = 1)
+ * 5. Inject RAS error and give time for it to trigger.
+ * 6. At this point SError is pended (ISR_EL1 = 0x100)
+ * 7. Make SMC call to get the version
+ * 8. On entering EL3, sync_exception_vector entry, will find that SError is
+ * pending.
+ * 9. Based on FFH routing model EL3 will call "handle_pending_async_ea" to
+ * handle nested exception SError first.
+ * 10.RAS error will be handled by platform handler and be notified to TFTF
+ * through SDEI handler.
+ * 12.Once the control returns back to vector entry of SMC, EL3 will continue
+ * with original SMC request.
+ *
+ * Checks:
+ * 1. Ensure that we did recieve SDEI notification
+ * 2. Ensure that second SMC request was successful.
+ *
+ */
+test_result_t test_ras_ffh_nested(void)
+{
+ int64_t ret;
+ const int event_id = 5000;
+ smc_args args;
+ smc_ret_values smc_ret;
+ u_register_t expected_ver;
+
+ /* Register SDEI handler */
+ ret = sdei_event_register(event_id, serror_sdei_event_handler, 0,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(event_id);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Get the version to compare against */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ smc_ret = tftf_smc(&args);
+ expected_ver = smc_ret.ret0;
+ smc_ret.ret0 = 0;
+
+ disable_serror();
+
+ inject_unrecoverable_ras_error();
+
+ waitms(50);
+
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+
+ /* Ensure that we are testing reflection path, SMC before SError */
+ if (sdei_event_received == true) {
+ tftf_testcase_printf("SError was triggered before SMC\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ smc_ret = tftf_smc(&args);
+
+ tftf_testcase_printf("SMCCC Version = %d.%d\n",
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK),
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK));
+
+ if ((int32_t)smc_ret.ret0 != expected_ver) {
+ printf("Unexpected SMCCC version: 0x%x\n", (int)smc_ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (sdei_event_received == false) {
+ tftf_testcase_printf("SError is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+#else
+test_result_t test_ras_ffh_nested(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/misc_tests/test_ras_kfh.c b/tftf/tests/misc_tests/test_ras_kfh.c
new file mode 100644
index 000000000..b38d6c42c
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ras_kfh.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <serror.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+static volatile uint64_t serror_triggered;
+extern void inject_unrecoverable_ras_error(void);
+
+static bool serror_handler(void)
+{
+ serror_triggered = 1;
+ return true;
+}
+
+/*
+ * Test Kernel First handling paradigm of RAS errors.
+ *
+ * Register a custom serror handler in tftf, inject a RAS error and wait
+ * for finite time to ensure that SError triggered and handled.
+ */
+test_result_t test_ras_kfh(void)
+{
+ register_custom_serror_handler(serror_handler);
+ inject_unrecoverable_ras_error();
+
+ /* Give reasonable time for SError to be triggered/handled */
+ waitms(500);
+
+ unregister_custom_serror_handler();
+
+ if (serror_triggered == false) {
+ tftf_testcase_printf("SError is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+#else
+test_result_t test_ras_kfh(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+#endif
diff --git a/tftf/tests/misc_tests/test_ras_kfh_reflect.c b/tftf/tests/misc_tests/test_ras_kfh_reflect.c
new file mode 100644
index 000000000..d24fc4792
--- /dev/null
+++ b/tftf/tests/misc_tests/test_ras_kfh_reflect.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <drivers/arm/arm_gic.h>
+#include <irq.h>
+#include <platform.h>
+#include <psci.h>
+#include <serror.h>
+#include <sgi.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+static volatile uint64_t serror_triggered;
+static volatile uint64_t irq_triggered;
+static u_register_t expected_ver;
+extern void inject_unrecoverable_ras_error(void);
+
+/*
+ * Tests to verify reflection of lower EL SErrors in RAS KFH mode.
+ *
+ * These tests exercises the path of EL3 reflection of SError back to lower
+ * EL, which gets triggered as part of error synchronization during EL3
+ * entry. This test works in conjunction with "ras_kfh_reflection.patch"
+ * present in CI repository.
+ *
+ * One test each to verify reflection in sync and async exception.
+ *
+ */
+static bool serror_handler(void)
+{
+ serror_triggered = 1;
+ tftf_testcase_printf("SError event received.\n");
+ return true;
+}
+
+static int irq_handler(void *data)
+{
+ irq_triggered = 1;
+ tftf_testcase_printf("IRQ received.\n");
+ return true;
+}
+
+/*
+ * Test Steps:
+ * 1. Register a custom SError handler for tftf
+ * 2. Make an SMC call to get the SMCCC version which will be used for
+ * comparing later on, along with that it also changes SCR_EL3.I = 1
+ * to route IRQ to EL3.
+ * 3. Disable SError (PSTATE.A = 1)
+ * 4. Inject RAS error and give time for it to trigger.
+ * 5. Register an SGI handler and inject SGI.
+ * 6. Becaue the IRQ is targeted to EL3 it will trap in EL3 irq_vector_entry
+ * 7. On entering EL3 it will find that SError is pending, So it will call
+ * "reflect_pending_serror_to_lower_el" and eret.
+ * 8. TF-A will eret back from EL3(without handling IRQ) and during ERET
+ * change SCR_EL3.I back to 0 along with unmasking SError for TFTF.
+ * SPSR.PSTATE.A = 0.
+ * 9. At tftf entry it will see both IRQ and SError pending, so it can take
+ * either of exception first (based on priority of SError/IRQ). The fvp model
+ * on which it was tested, IRQ is taken first.
+ * 10.First IRQ handler will be called and then SError handler will called.
+ *
+ */
+test_result_t test_ras_kfh_reflect_irq(void)
+{
+ smc_args args;
+ unsigned int mpid = read_mpidr_el1();
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ const unsigned int sgi_id = IRQ_NS_SGI_0;
+ smc_ret_values smc_ret;
+ int ret;
+
+ /* Get the SMCCC version to compare against */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ smc_ret = tftf_smc(&args);
+ expected_ver = smc_ret.ret0;
+
+ register_custom_serror_handler(serror_handler);
+ disable_serror();
+ inject_unrecoverable_ras_error();
+
+ waitms(50);
+
+ ret = tftf_irq_register_handler(sgi_id, irq_handler);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to register initial IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+ tftf_irq_enable(sgi_id, GIC_HIGHEST_NS_PRIORITY);
+ tftf_send_sgi(sgi_id, core_pos);
+
+ if ((serror_triggered == false) || (irq_triggered == false)) {
+ tftf_testcase_printf("SError or IRQ is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = tftf_irq_unregister_handler(sgi_id);
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to unregister IRQ handler\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ unregister_custom_serror_handler();
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test Steps:
+ * 1. Register a custom SError handler for tftf
+ * 3. Disable SError (PSTATE.A = 1)
+ * 4. Inject RAS error and give time for it to trigger.
+ * 5. Ensure SError is not triggered before making SMC call.
+ * 7. On entering EL3 it will find that SError is pending, So it will call
+ * "reflect_pending_serror_to_lower_el" and eret.
+ * 8. TF-A will eret back from EL3(without handling SMC) and during ERET
+ * unmask SError for TFTF (SPSR.PSTATE.A = 0).
+ * 9 .At TFTF entry it will see an SError pending which will cause registered
+ * SError handler to be called.
+ * 10.After retruning back from EL3 the original SMC request will be handled.
+ */
+test_result_t test_ras_kfh_reflect_sync(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+
+ serror_triggered = 0;
+
+ register_custom_serror_handler(serror_handler);
+ disable_serror();
+ inject_unrecoverable_ras_error();
+
+ waitms(50);
+
+ /* Ensure that we are testing reflection path, SMC before SError */
+ if (serror_triggered == true) {
+ tftf_testcase_printf("SError was triggered before SMC\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ tftf_testcase_printf("SMCCC Version = %d.%d\n",
+ (int)((ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK),
+ (int)((ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK));
+
+ if ((int32_t)ret.ret0 != expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n", (int)ret.ret0);
+ return TEST_RESULT_FAIL;
+ }
+
+ unregister_custom_serror_handler();
+
+ if (serror_triggered == false) {
+ tftf_testcase_printf("SError is not triggered\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+#else
+test_result_t test_ras_kfh_reflect_irq(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+
+test_result_t test_ras_kfh_reflect_sync(void)
+{
+ tftf_testcase_printf("Not supported on AArch32.\n");
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/misc_tests/test_single_fault.c b/tftf/tests/misc_tests/test_single_fault.c
index f55d8de8d..cacd0a766 100644
--- a/tftf/tests/misc_tests/test_single_fault.c
+++ b/tftf/tests/misc_tests/test_single_fault.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -11,20 +11,18 @@
#ifdef __aarch64__
-uint64_t serror_received;
-
-extern void inject_serror(void);
+static volatile uint64_t sdei_event_received;
+extern void inject_unrecoverable_ras_error(void);
+extern int serror_sdei_event_handler(int ev, uint64_t arg);
-int serror_handler(int ev, uint64_t arg)
+int sdei_handler(int ev, uint64_t arg)
{
- serror_received = 1;
+ sdei_event_received = 1;
tftf_testcase_printf("SError SDEI event received.\n");
return 0;
}
-extern int serror_sdei_event_handler(int ev, uint64_t arg);
-
test_result_t test_single_fault(void)
{
int64_t ret;
@@ -51,7 +49,12 @@ test_result_t test_single_fault(void)
return TEST_RESULT_FAIL;
}
- inject_serror();
+ inject_unrecoverable_ras_error();
+
+ /* Wait until the SError fires */
+ do {
+ dmbish();
+ } while (sdei_event_received == 0);
return TEST_RESULT_SUCCESS;
}
diff --git a/tftf/tests/misc_tests/test_uncontainable.c b/tftf/tests/misc_tests/test_uncontainable.c
index 231e5e8cf..5250f0a33 100644
--- a/tftf/tests/misc_tests/test_uncontainable.c
+++ b/tftf/tests/misc_tests/test_uncontainable.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -8,11 +8,11 @@
#ifdef __aarch64__
-extern void inject_uncontainable(void);
+extern void inject_uncontainable_ras_error(void);
test_result_t test_uncontainable(void)
{
- inject_uncontainable();
+ inject_uncontainable_ras_error();
return TEST_RESULT_SUCCESS;
}
diff --git a/tftf/tests/misc_tests/test_undef_injection.c b/tftf/tests/misc_tests/test_undef_injection.c
new file mode 100644
index 000000000..2d925a213
--- /dev/null
+++ b/tftf/tests/misc_tests/test_undef_injection.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <assert.h>
+#include <debug.h>
+#include <smccc.h>
+#include <sync.h>
+#include <tftf_lib.h>
+#include <platform_def.h>
+
+static volatile bool undef_injection_triggered;
+
+static bool undef_injection_handler(void)
+{
+ uint64_t esr_el2 = read_esr_el2();
+ if (EC_BITS(esr_el2) == EC_UNKNOWN) {
+ VERBOSE("UNDEF injection from EL3\n");
+ undef_injection_triggered = true;
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Test to verify UNDEF injection support in TF-A
+ *
+ * This test tries to access FGT EL2 registers which traps to EL3 and then
+ * the error is injected back from EL3 to TFTF to ensure that injection
+ * logic in TF-A is working, it also ensures that EL3 is still functional
+ * after UNDEF injection.
+ *
+ * To trap FGT register access to EL3, we run this test on a model with
+ * FEAT_FGT present but the traps from EL3 are not disabled by setting
+ * ENABLE_FEAT_FGT = 0
+ */
+test_result_t test_undef_injection(void)
+{
+ undef_injection_triggered = false;
+
+ register_custom_sync_exception_handler(undef_injection_handler);
+
+ /* Try to access a register which traps to EL3 */
+ read_hfgitr_el2();
+
+ unregister_custom_sync_exception_handler();
+
+ /* Ensure that EL3 still functional */
+ smc_args args;
+ smc_ret_values smc_ret;
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ smc_ret = tftf_smc(&args);
+
+ tftf_testcase_printf("SMCCC Version = %d.%d\n",
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MAJOR_SHIFT) & SMCCC_VERSION_MAJOR_MASK),
+ (int)((smc_ret.ret0 >> SMCCC_VERSION_MINOR_SHIFT) & SMCCC_VERSION_MINOR_MASK));
+
+ if (undef_injection_triggered == false) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/performance_tests/test_psci_latencies.c b/tftf/tests/performance_tests/test_psci_latencies.c
index 8a7a1d032..b20fe887b 100644
--- a/tftf/tests/performance_tests/test_psci_latencies.c
+++ b/tftf/tests/performance_tests/test_psci_latencies.c
@@ -34,27 +34,6 @@ static event_t target_booted, target_keep_on_booted, target_keep_on;
*/
#define BASELINE_VARIANCE 10
-/*
- * Utility function to wait for all CPUs other than the caller to be
- * OFF.
- */
-static void wait_for_non_lead_cpus(void)
-{
- unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int target_mpid, target_node;
-
- for_each_cpu(target_node) {
- target_mpid = tftf_get_mpidr_from_node(target_node);
- /* Skip lead CPU, as it is powered on */
- if (target_mpid == lead_mpid)
- continue;
-
- while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0)
- != PSCI_STATE_OFF)
- ;
- }
-}
-
static test_result_t test_target_function(void)
{
tftf_send_event(&target_booted);
diff --git a/tftf/tests/plat/xilinx/common/plat_pm.c b/tftf/tests/plat/xilinx/common/plat_pm.c
new file mode 100644
index 000000000..7f43824fe
--- /dev/null
+++ b/tftf/tests/plat/xilinx/common/plat_pm.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <smccc.h>
+#include <tftf_lib.h>
+
+#include <platform_def.h>
+
+/* Number of 32bits values in payload */
+#define PAYLOAD_ARG_CNT 4U
+
+#define upper_32_bits(n) ((uint32_t)(((n) >> 32)))
+#define lower_32_bits(n) ((uint32_t)((n) & 0xffffffff))
+
+
+#define PM_GET_API_VERSION 0xC2000001
+#define PM_GET_CHIPID 0xC2000018
+
+
+/*
+ * @Test_Aim@ Test to read the PM-API version from AMD-Xilinx platform
+ * This test run on lead CPU and issues PM_GET_API_VERSION SMC call to read the
+ * supported PM-API version on the platform.
+ * Return vslues are packed as
+ * ret.ret0(31:0) : actual return value
+ * ret.ret0(63:32) : Return arg1
+ * ret.ret1(31:0) : Return arg2
+ * ret.ret1(63:32) : Return arg3 and so on.
+ */
+test_result_t test_pmapi_version(void)
+{
+ smc_args args = { PM_GET_API_VERSION };
+ smc_ret_values ret;
+ uint32_t major, minor, status;
+
+ ret = tftf_smc(&args);
+ status = lower_32_bits(ret.ret0);
+ if (status) {
+ tftf_testcase_printf("%s ERROR Reading PM-API Version\n",
+ __func__);
+ return TEST_RESULT_FAIL;
+ }
+
+ major = upper_32_bits(ret.ret0) >> 16;
+ minor = upper_32_bits(ret.ret0) & 0xFFFF;
+
+ tftf_testcase_printf("%s PM-API Version : %d.%d\n", __func__,
+ major, minor);
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test to read the Chip ID of AMD-Xilinx platforms.
+ * This test runs on Lead CPU and issues PM_GET_CHIPID SMC call to read ChipID
+ * The IDcode and version is printed
+ * Return vslues are packed as
+ * ret.ret0(31:0) : actual return value
+ * ret.ret0(63:32) : Return arg1
+ * ret.ret1(31:0) : Return arg2
+ * ret.ret1(63:32) : Return arg3 and so on.
+ */
+test_result_t test_get_chipid(void)
+{
+ smc_args args = { PM_GET_CHIPID };
+ smc_ret_values ret;
+ uint32_t idcode, version, status;
+
+ ret = tftf_smc(&args);
+ status = lower_32_bits(ret.ret0);
+ if (status) {
+ tftf_testcase_printf("%s ERROR Reading Chip ID\n", __func__);
+ return TEST_RESULT_FAIL;
+ }
+
+ idcode = upper_32_bits(ret.ret0);
+ version = lower_32_bits(ret.ret1);
+
+ tftf_testcase_printf("%s Idcode = 0x%x Version = 0x%x\n", __func__,
+ idcode, version);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c
new file mode 100644
index 000000000..ebf40a531
--- /dev/null
+++ b/tftf/tests/runtime_services/arm_arch_svc/smccc_arch_workaround_3.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <arm_arch_svc.h>
+#include <debug.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <string.h>
+#include <tftf_lib.h>
+
+#ifdef __aarch64__
+#define CORTEX_A57_MIDR 0x410FD070
+#define CORTEX_A72_MIDR 0x410FD080
+#define CORTEX_A73_MIDR 0x410FD090
+#define CORTEX_A75_MIDR 0x410FD0A0
+
+static int cortex_a57_test(void);
+static int cortex_a73_test(void);
+static int cortex_a75_test(void);
+static int csv2_test(void);
+
+static struct ent {
+ unsigned int midr;
+ int (*wa_required)(void);
+} entries[] = {
+ { .midr = CORTEX_A57_MIDR, .wa_required = cortex_a57_test },
+ { .midr = CORTEX_A72_MIDR, .wa_required = csv2_test },
+ { .midr = CORTEX_A73_MIDR, .wa_required = cortex_a73_test },
+ { .midr = CORTEX_A75_MIDR, .wa_required = cortex_a75_test }
+};
+
+static int cortex_a57_test(void)
+{
+ return 1;
+}
+
+static int cortex_a73_test(void)
+{
+ return 1;
+}
+
+static int cortex_a75_test(void)
+{
+ return 1;
+}
+
+static int csv2_test(void)
+{
+ uint64_t pfr0;
+
+ pfr0 = read_id_aa64pfr0_el1() >> ID_AA64PFR0_CSV2_SHIFT;
+ if ((pfr0 & ID_AA64PFR0_CSV2_MASK) == 1) {
+ return 0;
+ }
+ return 1;
+}
+
+static test_result_t test_smccc_entrypoint(void)
+{
+ smc_args args;
+ smc_ret_values ret;
+ int32_t expected_ver;
+ unsigned int my_midr, midr_mask;
+ int wa_required;
+ size_t i;
+
+ /* Check if SMCCC version is at least v1.1 */
+ expected_ver = MAKE_SMCCC_VERSION(1, 1);
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_VERSION;
+ ret = tftf_smc(&args);
+ if ((int32_t)ret.ret0 < expected_ver) {
+ tftf_testcase_printf("Unexpected SMCCC version: 0x%x\n",
+ (int)ret.ret0);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* Check if SMCCC_ARCH_WORKAROUND_3 is required or not */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_ARCH_FEATURES;
+ args.arg1 = SMCCC_ARCH_WORKAROUND_3;
+ ret = tftf_smc(&args);
+ if ((int)ret.ret0 == -1) {
+ tftf_testcase_printf("SMCCC_ARCH_WORKAROUND_3 is not implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* If the call returns 0, it means the workaround is required */
+ if ((int)ret.ret0 == 0) {
+ wa_required = 1;
+ } else {
+ wa_required = 0;
+ }
+
+ /* Check if the SMC return value matches our expectations */
+ my_midr = (unsigned int)read_midr_el1();
+ midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | (MIDR_PN_MASK << MIDR_PN_SHIFT);
+ for (i = 0; i < ARRAY_SIZE(entries); i++) {
+ struct ent *entp = &entries[i];
+
+ if ((my_midr & midr_mask) == (entp->midr & midr_mask)) {
+ if (entp->wa_required() != wa_required) {
+ return TEST_RESULT_FAIL;
+ }
+ break;
+ }
+ }
+ if ((i == ARRAY_SIZE(entries)) && wa_required) {
+ tftf_testcase_printf("TFTF workaround table out of sync with TF-A\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Invoke the workaround to make sure nothing nasty happens */
+ memset(&args, 0, sizeof(args));
+ args.fid = SMCCC_ARCH_WORKAROUND_3;
+ tftf_smc(&args);
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_smccc_arch_workaround_3(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node, ret;
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ /* Power on all the non-lead cores. */
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node);
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)test_smccc_entrypoint, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+ /*
+ * Wait for test_smccc_entrypoint to return
+ * and the CPU to power down
+ */
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ return test_smccc_entrypoint();
+}
+#else
+test_result_t test_smccc_arch_workaround_3(void)
+{
+ INFO("%s skipped on AArch32\n", __func__);
+ return TEST_RESULT_SKIPPED;
+}
+#endif
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c b/tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c
new file mode 100644
index 000000000..ceca36de4
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_pmuv3.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <test_helpers.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_pmu.h>
+#include <platform.h>
+
+#define MAX_COUNTERS 31
+
+/* PMCCFILTR_EL0 mask */
+#define PMCCFILTR_EL0_MASK ( \
+ PMCCFILTR_EL0_P_BIT | \
+ PMCCFILTR_EL0_U_BIT | \
+ PMCCFILTR_EL0_NSK_BIT | \
+ PMCCFILTR_EL0_NSH_BIT | \
+ PMCCFILTR_EL0_M_BIT | \
+ PMCCFILTR_EL0_RLK_BIT | \
+ PMCCFILTR_EL0_RLU_BIT | \
+ PMCCFILTR_EL0_RLH_BIT)
+
+/* PMEVTYPER<n>_EL0 mask */
+#define PMEVTYPER_EL0_MASK ( \
+ PMEVTYPER_EL0_P_BIT | \
+ PMEVTYPER_EL0_U_BIT | \
+ PMEVTYPER_EL0_NSK_BIT | \
+ PMEVTYPER_EL0_NSU_BIT | \
+ PMEVTYPER_EL0_NSH_BIT | \
+ PMEVTYPER_EL0_M_BIT | \
+ PMEVTYPER_EL0_RLK_BIT | \
+ PMEVTYPER_EL0_RLU_BIT | \
+ PMEVTYPER_EL0_RLH_BIT | \
+ PMEVTYPER_EL0_EVTCOUNT_BITS)
+
+/* PMSELR_EL0 mask */
+#define PMSELR_EL0_MASK 0x1F
+
+#define WRITE_PMEV_REGS(n) { \
+ case n: \
+ pmu_ptr->pmevcntr_el0[n] = rand64(); \
+ write_pmevcntrn_el0(n, pmu_ptr->pmevcntr_el0[n]); \
+ pmu_ptr->pmevtyper_el0[n] = rand() & PMEVTYPER_EL0_MASK;\
+ write_pmevtypern_el0(n, pmu_ptr->pmevtyper_el0[n]); \
+}
+
+#define CHECK_PMEV_REG(n, reg) { \
+ read_val = read_##reg##n_el0(n); \
+ if (read_val != pmu_ptr->reg##_el0[n]) { \
+ ERROR("Corrupted "#reg"%d_el0=0x%lx (0x%lx)\n", \
+ n, read_val, pmu_ptr->reg##_el0[n]); \
+ return false; \
+ } \
+}
+
+#define CHECK_PMEV_REGS(n) { \
+ case n: \
+ CHECK_PMEV_REG(n, pmevcntr); \
+ CHECK_PMEV_REG(n, pmevtyper); \
+}
+
+#define WRITE_PMREG(reg, mask) { \
+ pmu_ptr->reg = rand64() & mask; \
+ write_##reg(pmu_ptr->reg); \
+}
+
+#define CHECK_PMREG(reg) { \
+ read_val = read_##reg(); \
+ val = pmu_ptr->reg; \
+ if (read_val != val) { \
+ ERROR("Corrupted "#reg"=0x%lx (0x%lx)\n", \
+ read_val, val); \
+ return false; \
+ } \
+}
+
+struct pmu_registers {
+ unsigned long pmcr_el0;
+ unsigned long pmcntenset_el0;
+ unsigned long pmovsset_el0;
+ unsigned long pmintenset_el1;
+ unsigned long pmccntr_el0;
+ unsigned long pmccfiltr_el0;
+ unsigned long pmuserenr_el0;
+
+ unsigned long pmevcntr_el0[MAX_COUNTERS];
+ unsigned long pmevtyper_el0[MAX_COUNTERS];
+
+ unsigned long pmselr_el0;
+ unsigned long pmxevcntr_el0;
+ unsigned long pmxevtyper_el0;
+
+} __aligned(CACHE_WRITEBACK_GRANULE);
+
+static struct pmu_registers pmu_state[PLATFORM_CORE_COUNT];
+
+void host_set_pmu_state(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ struct pmu_registers *pmu_ptr = &pmu_state[core_pos];
+ unsigned int num_cnts = GET_PMU_CNT;
+ unsigned long val;
+
+ val = read_pmcr_el0() | PMCR_EL0_DP_BIT;
+ pmu_ptr->pmcr_el0 = val;
+
+ /* Disable cycle counting and reset all counters */
+ write_pmcr_el0(val | PMCR_EL0_C_BIT | PMCR_EL0_P_BIT);
+
+ /* Disable all counters */
+ pmu_ptr->pmcntenset_el0 = 0UL;
+ write_pmcntenclr_el0(PMU_CLEAR_ALL);
+
+ /* Clear overflow status */
+ pmu_ptr->pmovsset_el0 = 0UL;
+ write_pmovsclr_el0(PMU_CLEAR_ALL);
+
+ /* Disable overflow interrupts on all counters */
+ pmu_ptr->pmintenset_el1 = 0UL;
+ write_pmintenclr_el1(PMU_CLEAR_ALL);
+
+ WRITE_PMREG(pmccntr_el0, UINT64_MAX);
+ WRITE_PMREG(pmccfiltr_el0, PMCCFILTR_EL0_MASK);
+
+ pmu_ptr->pmuserenr_el0 = read_pmuserenr_el0();
+
+ if (num_cnts != 0U) {
+ switch (--num_cnts) {
+ WRITE_PMEV_REGS(30);
+ WRITE_PMEV_REGS(29);
+ WRITE_PMEV_REGS(28);
+ WRITE_PMEV_REGS(27);
+ WRITE_PMEV_REGS(26);
+ WRITE_PMEV_REGS(25);
+ WRITE_PMEV_REGS(24);
+ WRITE_PMEV_REGS(23);
+ WRITE_PMEV_REGS(22);
+ WRITE_PMEV_REGS(21);
+ WRITE_PMEV_REGS(20);
+ WRITE_PMEV_REGS(19);
+ WRITE_PMEV_REGS(18);
+ WRITE_PMEV_REGS(17);
+ WRITE_PMEV_REGS(16);
+ WRITE_PMEV_REGS(15);
+ WRITE_PMEV_REGS(14);
+ WRITE_PMEV_REGS(13);
+ WRITE_PMEV_REGS(12);
+ WRITE_PMEV_REGS(11);
+ WRITE_PMEV_REGS(10);
+ WRITE_PMEV_REGS(9);
+ WRITE_PMEV_REGS(8);
+ WRITE_PMEV_REGS(7);
+ WRITE_PMEV_REGS(6);
+ WRITE_PMEV_REGS(5);
+ WRITE_PMEV_REGS(4);
+ WRITE_PMEV_REGS(3);
+ WRITE_PMEV_REGS(2);
+ WRITE_PMEV_REGS(1);
+ default:
+ WRITE_PMEV_REGS(0);
+ }
+
+ /* Generate a random number between 0 and num_cnts */
+ val = rand() % ++num_cnts;
+ } else {
+ val = 0UL;
+ }
+
+ pmu_ptr->pmselr_el0 = val;
+ write_pmselr_el0(val);
+
+ pmu_ptr->pmxevcntr_el0 = read_pmxevcntr_el0();
+ pmu_ptr->pmxevtyper_el0 = read_pmxevtyper_el0();
+}
+
+bool host_check_pmu_state(void)
+{
+ unsigned int core_pos = platform_get_core_pos(read_mpidr_el1());
+ struct pmu_registers *pmu_ptr = &pmu_state[core_pos];
+ unsigned int num_cnts = GET_PMU_CNT;
+ unsigned long val, read_val;
+
+ CHECK_PMREG(pmcr_el0);
+ CHECK_PMREG(pmcntenset_el0);
+ CHECK_PMREG(pmovsset_el0);
+ CHECK_PMREG(pmintenset_el1);
+ CHECK_PMREG(pmccntr_el0);
+ CHECK_PMREG(pmccfiltr_el0);
+ CHECK_PMREG(pmuserenr_el0);
+ CHECK_PMREG(pmselr_el0);
+ CHECK_PMREG(pmxevcntr_el0);
+ CHECK_PMREG(pmxevtyper_el0);
+
+ if (num_cnts != 0UL) {
+ switch (--num_cnts) {
+ CHECK_PMEV_REGS(30);
+ CHECK_PMEV_REGS(29);
+ CHECK_PMEV_REGS(28);
+ CHECK_PMEV_REGS(27);
+ CHECK_PMEV_REGS(26);
+ CHECK_PMEV_REGS(25);
+ CHECK_PMEV_REGS(24);
+ CHECK_PMEV_REGS(23);
+ CHECK_PMEV_REGS(22);
+ CHECK_PMEV_REGS(21);
+ CHECK_PMEV_REGS(20);
+ CHECK_PMEV_REGS(19);
+ CHECK_PMEV_REGS(18);
+ CHECK_PMEV_REGS(17);
+ CHECK_PMEV_REGS(16);
+ CHECK_PMEV_REGS(15);
+ CHECK_PMEV_REGS(14);
+ CHECK_PMEV_REGS(13);
+ CHECK_PMEV_REGS(12);
+ CHECK_PMEV_REGS(11);
+ CHECK_PMEV_REGS(10);
+ CHECK_PMEV_REGS(9);
+ CHECK_PMEV_REGS(8);
+ CHECK_PMEV_REGS(7);
+ CHECK_PMEV_REGS(6);
+ CHECK_PMEV_REGS(5);
+ CHECK_PMEV_REGS(4);
+ CHECK_PMEV_REGS(3);
+ CHECK_PMEV_REGS(2);
+ CHECK_PMEV_REGS(1);
+ default:
+ CHECK_PMEV_REGS(0);
+ }
+ }
+
+ return true;
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c b/tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c
new file mode 100644
index 000000000..2c5891938
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_realm_helper.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <stdint.h>
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <heap/page_alloc.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_rmi.h>
+#include <host_shared_data.h>
+#include <platform.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <realm_def.h>
+#include <sgi.h>
+#include <test_helpers.h>
+#include <xlat_tables_v2.h>
+
+#define RMI_EXIT(id) \
+ [RMI_EXIT_##id] = #id
+
+const char *rmi_exit[] = {
+ RMI_EXIT(SYNC),
+ RMI_EXIT(IRQ),
+ RMI_EXIT(FIQ),
+ RMI_EXIT(FIQ),
+ RMI_EXIT(PSCI),
+ RMI_EXIT(RIPAS_CHANGE),
+ RMI_EXIT(HOST_CALL),
+ RMI_EXIT(SERROR)
+};
+
+/*
+ * The function handler to print the Realm logged buffer,
+ * executed by the secondary core
+ */
+void realm_print_handler(struct realm *realm_ptr, unsigned int rec_num)
+{
+ size_t str_len = 0UL;
+ host_shared_data_t *host_shared_data;
+ char *log_buffer;
+
+ assert(realm_ptr != NULL);
+ host_shared_data = host_get_shared_structure(realm_ptr, rec_num);
+ log_buffer = (char *)host_shared_data->log_buffer;
+ str_len = strlen((const char *)log_buffer);
+
+ /*
+ * Read Realm message from shared printf location and print
+ * them using UART
+ */
+ if (str_len != 0UL) {
+ /* Avoid memory overflow */
+ log_buffer[MAX_BUF_SIZE - 1] = 0U;
+ mp_printf("[VMID %u][Rec %u]: %s", realm_ptr->vmid, rec_num, log_buffer);
+ (void)memset((char *)log_buffer, 0, MAX_BUF_SIZE);
+ }
+}
+
+/*
+ * Initialisation function which will clear the shared region,
+ * and try to find another CPU other than the lead one to
+ * handle the Realm message logging.
+ */
+static void host_init_realm_print_buffer(struct realm *realm_ptr)
+{
+ host_shared_data_t *host_shared_data;
+
+ for (unsigned int i = 0U; i < realm_ptr->rec_count; i++) {
+ host_shared_data = host_get_shared_structure(realm_ptr, i);
+ (void)memset((char *)host_shared_data, 0, sizeof(host_shared_data_t));
+ }
+}
+
+static bool host_enter_realm(struct realm *realm_ptr,
+ u_register_t *exit_reason,
+ unsigned int *host_call_result,
+ unsigned int rec_num)
+{
+ u_register_t ret;
+
+ if (!realm_ptr->payload_created) {
+ ERROR("%s() failed\n", "payload_created");
+ return false;
+ }
+ if (!realm_ptr->shared_mem_created) {
+ ERROR("%s() failed\n", "shared_mem_created");
+ return false;
+ }
+
+ /* Enter Realm */
+ ret = host_realm_rec_enter(realm_ptr, exit_reason, host_call_result, rec_num);
+ if (ret != REALM_SUCCESS) {
+ ERROR("%s() failed, ret=%lx\n", "host_realm_rec_enter", ret);
+ return false;
+ }
+
+ return true;
+}
+
+bool host_prepare_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count)
+{
+ int8_t value;
+
+ if (realm_payload_adr == TFTF_BASE) {
+ ERROR("realm_payload_adr should be grater then TFTF_BASE\n");
+ return false;
+ }
+
+ if (plat_mem_pool_adr == 0UL ||
+ realm_pages_size == 0UL) {
+ ERROR("plat_mem_pool_size or "
+ "realm_pages_size is NULL\n");
+ return false;
+ }
+
+ if (plat_mem_pool_adr < PAGE_POOL_BASE ||
+ plat_mem_pool_adr + realm_pages_size > NS_REALM_SHARED_MEM_BASE) {
+ ERROR("Invalid pool range\n");
+ return false;
+ }
+
+ INFO("Realm start adr=0x%lx\n", plat_mem_pool_adr);
+
+ /* Initialize Host NS heap memory to be used in Realm creation*/
+ if (page_pool_init(plat_mem_pool_adr, realm_pages_size)
+ != HEAP_INIT_SUCCESS) {
+ ERROR("%s() failed\n", "page_pool_init");
+ return false;
+ }
+ memset((char *)realm_ptr, 0U, sizeof(struct realm));
+
+ /* Read Realm Feature Reg 0 */
+ if (host_rmi_features(0UL, &realm_ptr->rmm_feat_reg0) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_rmi_features");
+ return false;
+ }
+
+ /* Fail if IPA bits > implemented size */
+ if (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, feature_flag) >
+ EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, realm_ptr->rmm_feat_reg0)) {
+ ERROR("%s() failed\n", "Invalid s2sz");
+ return false;
+ }
+
+ /*
+ * Overwrite s2sz in feature flag if host passed a value
+ * if host passes default 0 use default RMI_FEATURES instead
+ */
+ if (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, feature_flag) != 0U) {
+ realm_ptr->rmm_feat_reg0 &= ~MASK(RMI_FEATURE_REGISTER_0_S2SZ);
+ realm_ptr->rmm_feat_reg0 |= INPLACE(RMI_FEATURE_REGISTER_0_S2SZ,
+ EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, feature_flag));
+ }
+
+ /*
+ * At the moment, TFTF does not have support for FEAT_LPA2, so if
+ * S2SZ is larger than 48 bits, truncate it to ensure we don't surpass
+ * the maximum IPA size for a realm with no LPA2 support.
+ */
+ if (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, realm_ptr->rmm_feat_reg0) > 48U) {
+ realm_ptr->rmm_feat_reg0 &=
+ ~MASK(RMI_FEATURE_REGISTER_0_S2SZ);
+ realm_ptr->rmm_feat_reg0 |=
+ INPLACE(RMI_FEATURE_REGISTER_0_S2SZ, 48U);
+ }
+
+ /* Disable PMU if not required */
+ if ((feature_flag & RMI_FEATURE_REGISTER_0_PMU_EN) == 0UL) {
+ realm_ptr->rmm_feat_reg0 &= ~RMI_FEATURE_REGISTER_0_PMU_EN;
+ realm_ptr->pmu_num_ctrs = 0U;
+ } else {
+ value = EXTRACT(FEATURE_PMU_NUM_CTRS, feature_flag);
+ if (value != -1) {
+ realm_ptr->pmu_num_ctrs = (unsigned int)value;
+ } else {
+ realm_ptr->pmu_num_ctrs =
+ EXTRACT(RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS,
+ realm_ptr->rmm_feat_reg0);
+ }
+ }
+
+ /* Disable SVE if not required */
+ if ((feature_flag & RMI_FEATURE_REGISTER_0_SVE_EN) == 0UL) {
+ realm_ptr->rmm_feat_reg0 &= ~RMI_FEATURE_REGISTER_0_SVE_EN;
+ realm_ptr->sve_vl = 0U;
+ } else {
+ realm_ptr->sve_vl = EXTRACT(FEATURE_SVE_VL, feature_flag);
+ }
+
+ /* Requested number of breakpoints */
+ value = EXTRACT(FEATURE_NUM_BPS, feature_flag);
+ if (value != -1) {
+ realm_ptr->num_bps = (unsigned int)value;
+ } else {
+ realm_ptr->num_bps = EXTRACT(RMI_FEATURE_REGISTER_0_NUM_BPS,
+ realm_ptr->rmm_feat_reg0);
+ }
+
+ /* Requested number of watchpoints */
+ value = EXTRACT(FEATURE_NUM_WPS, feature_flag);
+ if (value != -1) {
+ realm_ptr->num_wps = (unsigned int)value;
+ } else {
+ realm_ptr->num_wps = EXTRACT(RMI_FEATURE_REGISTER_0_NUM_WPS,
+ realm_ptr->rmm_feat_reg0);
+ }
+
+ /* Set SVE bits from feature_flag */
+ realm_ptr->rmm_feat_reg0 &= ~(RMI_FEATURE_REGISTER_0_SVE_EN |
+ MASK(RMI_FEATURE_REGISTER_0_SVE_VL));
+ if ((feature_flag & RMI_FEATURE_REGISTER_0_SVE_EN) != 0UL) {
+ realm_ptr->rmm_feat_reg0 |= RMI_FEATURE_REGISTER_0_SVE_EN |
+ INPLACE(RMI_FEATURE_REGISTER_0_SVE_VL,
+ EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL,
+ feature_flag));
+ }
+
+ if (realm_ptr->rec_count > MAX_REC_COUNT) {
+ ERROR("Invalid Rec Count\n");
+ return false;
+ }
+ realm_ptr->rec_count = rec_count;
+ for (unsigned int i = 0U; i < rec_count; i++) {
+ if (rec_flag[i] == RMI_RUNNABLE ||
+ rec_flag[i] == RMI_NOT_RUNNABLE) {
+ realm_ptr->rec_flag[i] = rec_flag[i];
+ } else {
+ ERROR("Invalid Rec Flag\n");
+ return false;
+ }
+ }
+
+ /* Create Realm */
+ if (host_realm_create(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_create");
+ return false;
+ }
+
+ /* RTT map Realm image */
+ if (host_realm_map_payload_image(realm_ptr, realm_payload_adr) !=
+ REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_map_payload_image");
+ goto destroy_realm;
+ }
+
+ realm_ptr->payload_created = true;
+
+ return true;
+
+ /* Free test resources */
+destroy_realm:
+ if (host_realm_destroy(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_destroy");
+ }
+ realm_ptr->payload_created = false;
+
+ return false;
+}
+
+bool host_create_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count)
+{
+ bool ret;
+
+ ret = host_prepare_realm_payload(realm_ptr,
+ realm_payload_adr,
+ plat_mem_pool_adr,
+ realm_pages_size,
+ feature_flag,
+ rec_flag,
+ rec_count);
+ if (!ret) {
+ goto destroy_realm;
+ } else {
+ /* Create REC */
+ if (host_realm_rec_create(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_rec_create");
+ goto destroy_realm;
+ }
+
+ if (host_realm_init_ipa_state(realm_ptr, 0U, 0U, 1ULL << 32)
+ != RMI_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_init_ipa_state");
+ goto destroy_realm;
+ }
+ }
+ return true;
+
+destroy_realm:
+ if (host_realm_destroy(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_destroy");
+ }
+ realm_ptr->payload_created = false;
+ return false;
+}
+
+bool host_create_activate_realm_payload(struct realm *realm_ptr,
+ u_register_t realm_payload_adr,
+ u_register_t plat_mem_pool_adr,
+ u_register_t realm_pages_size,
+ u_register_t feature_flag,
+ const u_register_t *rec_flag,
+ unsigned int rec_count)
+
+{
+ bool ret;
+
+ ret = host_create_realm_payload(realm_ptr,
+ realm_payload_adr,
+ plat_mem_pool_adr,
+ realm_pages_size,
+ feature_flag,
+ rec_flag,
+ rec_count);
+ if (!ret) {
+ goto destroy_realm;
+ } else {
+ /* Activate Realm */
+ if (host_realm_activate(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto destroy_realm;
+ }
+ }
+ return true;
+
+destroy_realm:
+ if (host_realm_destroy(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_destroy");
+ }
+ realm_ptr->payload_created = false;
+ return false;
+}
+
+bool host_create_shared_mem(struct realm *realm_ptr,
+ u_register_t ns_shared_mem_adr,
+ u_register_t ns_shared_mem_size)
+{
+ if (ns_shared_mem_adr < NS_REALM_SHARED_MEM_BASE ||
+ ns_shared_mem_adr + ns_shared_mem_size > PAGE_POOL_END) {
+ ERROR("%s() Invalid adr range\n", "host_realm_map_ns_shared");
+ return false;
+ }
+
+ /* RTT map NS shared region */
+ if (host_realm_map_ns_shared(realm_ptr, ns_shared_mem_adr,
+ ns_shared_mem_size) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_map_ns_shared");
+ realm_ptr->shared_mem_created = false;
+ return false;
+ }
+
+ memset((void *)ns_shared_mem_adr, 0, (size_t)ns_shared_mem_size);
+ realm_ptr->host_shared_data = ns_shared_mem_adr;
+ realm_ptr->shared_mem_created = true;
+ host_init_realm_print_buffer(realm_ptr);
+
+ return true;
+}
+
+bool host_destroy_realm(struct realm *realm_ptr)
+{
+ /* Free test resources */
+ page_pool_reset();
+
+ if (!realm_ptr->payload_created) {
+ ERROR("%s() failed\n", "payload_created");
+ return false;
+ }
+
+ realm_ptr->payload_created = false;
+ if (host_realm_destroy(realm_ptr) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_destroy");
+ return false;
+ }
+ memset((char *)realm_ptr, 0U, sizeof(struct realm));
+
+ return true;
+}
+
+/*
+ * Enter Realm and run command passed in 'cmd' and compare the exit reason with
+ * 'test_exit_reason'.
+ *
+ * Returns:
+ * true: On success. 'test_exit_reason' matches Realm exit reason. For
+ * RMI_EXIT_HOST_CALL exit reason, the 'host_call_result' is
+ * TEST_RESULT_SUCCESS.
+ * false: On error.
+ */
+bool host_enter_realm_execute(struct realm *realm_ptr,
+ uint8_t cmd,
+ int test_exit_reason,
+ unsigned int rec_num)
+{
+ u_register_t realm_exit_reason = RMI_EXIT_INVALID;
+ unsigned int host_call_result = TEST_RESULT_FAIL;
+
+ if (realm_ptr == NULL || realm_ptr->payload_created == false) {
+ return false;
+ }
+
+ if (test_exit_reason >= RMI_EXIT_INVALID) {
+ ERROR("Invalid RmiRecExitReason\n");
+ return false;
+ }
+
+ if (rec_num >= realm_ptr->rec_count) {
+ ERROR("Invalid Rec Count\n");
+ return false;
+ }
+ host_shared_data_set_realm_cmd(realm_ptr, cmd, rec_num);
+ if (!host_enter_realm(realm_ptr, &realm_exit_reason, &host_call_result, rec_num)) {
+ return false;
+ }
+
+
+ if (test_exit_reason == realm_exit_reason) {
+ if (realm_exit_reason != RMI_EXIT_HOST_CALL) {
+ return true;
+ } else if (host_call_result == TEST_RESULT_SUCCESS) {
+ return true;
+ }
+ }
+
+ if (realm_exit_reason < RMI_EXIT_INVALID) {
+ if ((realm_exit_reason == RMI_EXIT_HOST_CALL) &&
+ (test_exit_reason == realm_exit_reason)) {
+ ERROR("%s(%u) RMI_EXIT_HOST_CALL failed\n", __func__,
+ cmd);
+ } else {
+ ERROR("%s(%u) Got RMI_EXIT_%s. Expected RMI_EXIT_%s.\n",
+ __func__, cmd, rmi_exit[realm_exit_reason],
+ rmi_exit[test_exit_reason]);
+ }
+ } else {
+ ERROR("%s(%u) Unknown or unsupported RmiRecExitReason: 0x%lx\n",
+ __func__, cmd, realm_exit_reason);
+ }
+ return false;
+}
+
+test_result_t host_cmp_result(void)
+{
+ if (host_rmi_get_cmp_result()) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ ERROR("RMI registers comparison failed\n");
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * Returns Host core position for specified Rec
+ * Host mpidr is saved on every rec enter
+ */
+static unsigned int host_realm_find_core_pos_by_rec(struct realm *realm_ptr,
+ unsigned int rec_num)
+{
+ if (rec_num < MAX_REC_COUNT && realm_ptr->run[rec_num] != 0U) {
+ return platform_get_core_pos(realm_ptr->host_mpidr[rec_num]);
+ }
+ return (unsigned int)-1;
+}
+
+/*
+ * Send SGI on core running specified Rec
+ * API can be used to forcefully exit from Realm
+ */
+void host_rec_send_sgi(struct realm *realm_ptr,
+ unsigned int sgi,
+ unsigned int rec_num)
+{
+ unsigned int core_pos = host_realm_find_core_pos_by_rec(realm_ptr, rec_num);
+ if (core_pos < PLATFORM_CORE_COUNT) {
+ tftf_send_sgi(sgi, core_pos);
+ }
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c b/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c
new file mode 100644
index 000000000..313009c29
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_realm_rmi.c
@@ -0,0 +1,1258 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include <debug.h>
+#include <heap/page_alloc.h>
+#include <test_helpers.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_rmi.h>
+#include <host_shared_data.h>
+#include <plat/common/platform.h>
+#include <realm_def.h>
+#include <tftf_lib.h>
+
+#define SET_ARG(_n) { \
+ case _n: \
+ regs[_n] = rand64(); \
+ CONC(args->arg, _n) = regs[_n]; \
+ __attribute__((fallthrough)); \
+}
+
+#define CHECK_RET(_n) { \
+ if (CONC(ret_val.ret, _n) != regs[_n]) { \
+ cmp_flag |= (1U << _n); \
+ } \
+}
+
+static bool rmi_cmp_result;
+static unsigned short vmid;
+
+static smc_ret_values host_rmi_handler(smc_args *args, unsigned int in_reg)
+{
+ u_register_t regs[8];
+ smc_ret_values ret_val;
+ unsigned int cmp_flag = 0U;
+
+ assert(args != NULL);
+ assert((in_reg >= 1U) && (in_reg <= 7U));
+
+ /* Function identifier */
+ regs[0] = (u_register_t)args->fid;
+
+ /* X4 and X5 can be passed as parameters */
+ regs[4] = args->arg4;
+ regs[5] = args->arg5;
+
+ /* SMC calls arguments in X1-X7 */
+ switch (in_reg) {
+ SET_ARG(1);
+ SET_ARG(2);
+ SET_ARG(3);
+ SET_ARG(4);
+ SET_ARG(5);
+ SET_ARG(6);
+ default:
+ regs[7] = rand();
+ args->arg7 = regs[7];
+ }
+
+ ret_val = tftf_smc(args);
+
+ /*
+ * According to SMCCC v1.2 X4-X7 registers' values
+ * must be preserved unless they contain result,
+ * as specified in the function definition.
+ */
+ if (regs[0] != RMI_RTT_READ_ENTRY) {
+ CHECK_RET(4);
+ }
+
+ CHECK_RET(5);
+ CHECK_RET(6);
+ CHECK_RET(7);
+
+ if (cmp_flag != 0U) {
+ rmi_cmp_result = false;
+
+ ERROR("RMI SMC 0x%lx corrupted registers: %s %s %s %s\n",
+ regs[0],
+ (((cmp_flag & (1U << 4)) != 0U) ? "X4" : ""),
+ (((cmp_flag & (1U << 5)) != 0U) ? "X5" : ""),
+ (((cmp_flag & (1U << 6)) != 0U) ? "X6" : ""),
+ (((cmp_flag & (1U << 7)) != 0U) ? "X7" : ""));
+ }
+
+ return ret_val;
+}
+
+void host_rmi_init_cmp_result(void)
+{
+ rmi_cmp_result = true;
+}
+
+bool host_rmi_get_cmp_result(void)
+{
+ return rmi_cmp_result;
+}
+
+u_register_t host_rmi_psci_complete(u_register_t calling_rec, u_register_t target_rec,
+ unsigned long status)
+{
+ return (host_rmi_handler(&(smc_args) {RMI_PSCI_COMPLETE, calling_rec,
+ target_rec, status}, 4U)).ret0;
+}
+
+u_register_t host_rmi_data_create(bool unknown,
+ u_register_t rd,
+ u_register_t data,
+ u_register_t map_addr,
+ u_register_t src)
+{
+ if (unknown) {
+ return host_rmi_handler(&(smc_args) {RMI_DATA_CREATE_UNKNOWN,
+ rd, data, map_addr}, 4U).ret0;
+ } else {
+ return host_rmi_handler(&(smc_args) {RMI_DATA_CREATE,
+ /* X5 = flags */
+ rd, data, map_addr, src, 0UL}, 6U).ret0;
+ }
+}
+
+static inline u_register_t host_rmi_realm_activate(u_register_t rd)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REALM_ACTIVATE, rd}, 2U).ret0;
+}
+
+u_register_t host_rmi_realm_create(u_register_t rd, u_register_t params_ptr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REALM_CREATE, rd, params_ptr},
+ 3U).ret0;
+}
+
+u_register_t host_rmi_realm_destroy(u_register_t rd)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REALM_DESTROY, rd}, 2U).ret0;
+}
+
+u_register_t host_rmi_data_destroy(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t *data,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_DATA_DESTROY, rd, map_addr,
+ (u_register_t)&rets}, 4U);
+
+ *data = rets.ret1;
+ *top = rets.ret2;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rec_create(u_register_t rd,
+ u_register_t rec,
+ u_register_t params_ptr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REC_CREATE,
+ rd, rec, params_ptr}, 4U).ret0;
+}
+
+static inline u_register_t host_rmi_rec_destroy(u_register_t rec)
+{
+ return host_rmi_handler(&(smc_args) {RMI_REC_DESTROY, rec}, 2U).ret0;
+}
+
+static inline u_register_t host_rmi_rtt_create(u_register_t rd,
+ u_register_t rtt,
+ u_register_t map_addr,
+ u_register_t level)
+{
+ return host_rmi_handler(&(smc_args) {RMI_RTT_CREATE,
+ rd, rtt, map_addr, level}, 5U).ret0;
+}
+
+u_register_t host_rmi_rtt_destroy(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *rtt,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_DESTROY,
+ rd, map_addr, level, (u_register_t)&rets}, 5U);
+ *rtt = rets.ret1;
+ *top = rets.ret2;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_features(u_register_t index, u_register_t *features)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_FEATURES, index}, 2U);
+ *features = rets.ret1;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_rtt_init_ripas(u_register_t rd,
+ u_register_t start,
+ u_register_t end,
+ u_register_t *top)
+
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_INIT_RIPAS,
+ rd, start, end}, 4U);
+ *top = rets.ret1;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rtt_fold(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *pa)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_FOLD,
+ rd, map_addr, level, (u_register_t)&rets}, 5U);
+ *pa = rets.ret1;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rec_aux_count(u_register_t rd,
+ u_register_t *aux_count)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_REC_AUX_COUNT, rd}, 2U);
+ *aux_count = rets.ret1;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_rtt_set_ripas(u_register_t rd,
+ u_register_t rec,
+ u_register_t start,
+ u_register_t end,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_SET_RIPAS,
+ rd, rec, start, end}, 5U);
+ *top = rets.ret1;
+ return rets.ret0;
+}
+
+static inline u_register_t host_rmi_rtt_mapunprotected(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t ns_pa)
+{
+ return host_rmi_handler(&(smc_args) {RMI_RTT_MAP_UNPROTECTED,
+ rd, map_addr, level, ns_pa}, 5U).ret0;
+}
+
+u_register_t host_rmi_rtt_readentry(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ struct rtt_entry *rtt)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_READ_ENTRY,
+ rd, map_addr, level}, 4U);
+ rtt->walk_level = rets.ret1;
+ rtt->state = rets.ret2;
+ rtt->out_addr = rets.ret3;
+ rtt->ripas = rets.ret4;
+ return rets.ret0;
+}
+
+u_register_t host_rmi_rtt_unmap_unprotected(u_register_t rd,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t *top)
+{
+ smc_ret_values rets;
+
+ rets = host_rmi_handler(&(smc_args) {RMI_RTT_UNMAP_UNPROTECTED,
+ rd, map_addr, level}, 4U);
+ *top = rets.ret1;
+ return rets.ret0;
+}
+
+static inline bool ipa_is_ns(u_register_t addr, u_register_t rmm_feat_reg0)
+{
+ return (addr >> (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, rmm_feat_reg0) - 1UL) == 1UL);
+}
+
+static inline u_register_t host_realm_rtt_create(struct realm *realm,
+ u_register_t addr,
+ u_register_t level,
+ u_register_t phys)
+{
+ addr = ALIGN_DOWN(addr, RTT_MAP_SIZE(level - 1U));
+ return host_rmi_rtt_create(realm->rd, phys, addr, level);
+}
+
+u_register_t host_rmi_create_rtt_levels(struct realm *realm,
+ u_register_t map_addr,
+ u_register_t level,
+ u_register_t max_level)
+{
+ u_register_t rtt, ret;
+
+ while (level++ < max_level) {
+ rtt = (u_register_t)page_alloc(PAGE_SIZE);
+ if (rtt == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for rtt\n");
+ return REALM_ERROR;
+ } else {
+ ret = host_rmi_granule_delegate(rtt);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", rtt, ret);
+ return REALM_ERROR;
+ }
+ }
+ ret = host_realm_rtt_create(realm, map_addr, level, rtt);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt=0x%lx ret=0x%lx\n",
+ "host_realm_rtt_create", rtt, ret);
+ host_rmi_granule_undelegate(rtt);
+ page_free(rtt);
+ return REALM_ERROR;
+ }
+ }
+
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_fold_rtt(u_register_t rd, u_register_t addr,
+ u_register_t level)
+{
+ struct rtt_entry rtt;
+ u_register_t pa, ret;
+
+ ret = host_rmi_rtt_readentry(rd, addr, level, &rtt);
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, level=0x%lx addr=0x%lx ret=0x%lx\n",
+ "host_rmi_rtt_readentry", level, addr, ret);
+ return REALM_ERROR;
+ }
+
+ if (rtt.state != RMI_TABLE) {
+ ERROR("%s() failed, rtt.state=%lu\n", "rmi_rtt_readentry",
+ rtt.state);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_rtt_fold(rd, addr, level + 1U, &pa);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_rtt_fold", addr, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(rtt.out_addr);
+
+ return REALM_SUCCESS;
+
+}
+
+u_register_t host_realm_delegate_map_protected_data(bool unknown,
+ struct realm *realm,
+ u_register_t target_pa,
+ u_register_t map_size,
+ u_register_t src_pa)
+{
+ u_register_t rd = realm->rd;
+ u_register_t map_level, level;
+ u_register_t ret = 0UL;
+ u_register_t size = 0UL;
+ u_register_t phys = target_pa;
+ u_register_t map_addr = target_pa;
+
+ if (!IS_ALIGNED(map_addr, map_size)) {
+ return REALM_ERROR;
+ }
+
+ switch (map_size) {
+ case PAGE_SIZE:
+ map_level = 3UL;
+ break;
+ case RTT_L2_BLOCK_SIZE:
+ map_level = 2UL;
+ break;
+ default:
+ ERROR("Unknown map_size=0x%lx\n", map_size);
+ return REALM_ERROR;
+ }
+
+ for (size = 0UL; size < map_size; size += PAGE_SIZE) {
+ ret = host_rmi_granule_delegate(phys);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, PA=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", phys, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_data_create(unknown, rd, phys, map_addr, src_pa);
+
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ /* Create missing RTTs and retry */
+ level = RMI_RETURN_INDEX(ret);
+ ret = host_rmi_create_rtt_levels(realm, map_addr, level,
+ map_level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_create_rtt_levels",
+ ret, __LINE__);
+ goto err;
+ }
+
+ ret = host_rmi_data_create(unknown, rd, phys, map_addr,
+ src_pa);
+ }
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_rmi_data_create", ret);
+ goto err;
+ }
+
+ phys += PAGE_SIZE;
+ src_pa += PAGE_SIZE;
+ map_addr += PAGE_SIZE;
+ }
+
+ if (map_size == RTT_L2_BLOCK_SIZE) {
+ ret = host_realm_fold_rtt(rd, target_pa, map_level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_realm_fold_rtt", ret);
+ goto err;
+ }
+ }
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", __func__, ret);
+ goto err;
+ }
+
+ return REALM_SUCCESS;
+
+err:
+ while (size >= PAGE_SIZE) {
+ u_register_t data, top;
+
+ ret = host_rmi_data_destroy(rd, map_addr, &data, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_data_destroy", map_addr, ret);
+ }
+
+ ret = host_rmi_granule_undelegate(phys);
+ if (ret != RMI_SUCCESS) {
+ /* Page can't be returned to NS world so is lost */
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_rmi_granule_undelegate", ret);
+ }
+ phys -= PAGE_SIZE;
+ size -= PAGE_SIZE;
+ map_addr -= PAGE_SIZE;
+ }
+
+ return REALM_ERROR;
+}
+
+u_register_t host_realm_map_unprotected(struct realm *realm,
+ u_register_t ns_pa,
+ u_register_t map_size)
+{
+ u_register_t rd = realm->rd;
+ u_register_t map_level, level;
+ u_register_t ret = 0UL;
+ u_register_t phys = ns_pa;
+ u_register_t map_addr = ns_pa |
+ (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0) - 1UL));
+
+ if (!IS_ALIGNED(map_addr, map_size)) {
+ return REALM_ERROR;
+ }
+
+ switch (map_size) {
+ case PAGE_SIZE:
+ map_level = 3UL;
+ break;
+ case RTT_L2_BLOCK_SIZE:
+ map_level = 2UL;
+ break;
+ default:
+ ERROR("Unknown map_size=0x%lx\n", map_size);
+ return REALM_ERROR;
+ }
+ u_register_t desc = phys | S2TTE_ATTR_FWB_WB_RW;
+
+ ret = host_rmi_rtt_mapunprotected(rd, map_addr, map_level, desc);
+
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ /* Create missing RTTs and retry */
+ level = RMI_RETURN_INDEX(ret);
+ ret = host_rmi_create_rtt_levels(realm, map_addr, level,
+ map_level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_create_rtt_levels", ret, __LINE__);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_rtt_mapunprotected(rd, map_addr, map_level,
+ desc);
+ }
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_rmi_rtt_mapunprotected",
+ ret);
+ return REALM_ERROR;
+ }
+
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_rtt_destroy(struct realm *realm,
+ u_register_t addr,
+ u_register_t level,
+ u_register_t *rtt,
+ u_register_t *top)
+{
+ addr = ALIGN_DOWN(addr, RTT_MAP_SIZE(level - 1U));
+ return host_rmi_rtt_destroy(realm->rd, addr, level, rtt, top);
+}
+
+static u_register_t host_realm_destroy_free_rtt(struct realm *realm,
+ u_register_t addr,
+ u_register_t level,
+ u_register_t rtt_granule)
+{
+ u_register_t rtt, top, ret;
+
+ ret = host_realm_rtt_destroy(realm, addr, level, &rtt, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n",
+ "host_realm_rtt_destroy", ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(rtt_granule);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", rtt_granule, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(rtt_granule);
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_destroy_undelegate_range(struct realm *realm,
+ u_register_t ipa,
+ u_register_t addr,
+ u_register_t size)
+{
+ u_register_t rd = realm->rd;
+ u_register_t ret;
+ u_register_t data, top;
+
+ while (size >= PAGE_SIZE) {
+ ret = host_rmi_data_destroy(rd, ipa, &data, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_data_destroy", ipa, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", ipa, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(addr);
+
+ addr += PAGE_SIZE;
+ ipa += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ return REALM_SUCCESS;
+}
+
+static u_register_t host_realm_tear_down_rtt_range(struct realm *realm,
+ u_register_t level,
+ u_register_t start,
+ u_register_t end)
+{
+ u_register_t rd = realm->rd;
+ u_register_t map_size = RTT_MAP_SIZE(level);
+ u_register_t map_addr, next_addr, rtt_out_addr, end_addr, top;
+ struct rtt_entry rtt;
+ u_register_t ret;
+
+ for (map_addr = start; map_addr < end; map_addr = next_addr) {
+ next_addr = ALIGN(map_addr + 1U, map_size);
+ end_addr = MIN(next_addr, end);
+
+ ret = host_rmi_rtt_readentry(rd, ALIGN_DOWN(map_addr, map_size),
+ level, &rtt);
+ if (ret != RMI_SUCCESS) {
+ continue;
+ }
+
+ rtt_out_addr = rtt.out_addr;
+
+ switch (rtt.state) {
+ case RMI_ASSIGNED:
+ if (ipa_is_ns(map_addr, realm->rmm_feat_reg0)) {
+
+ ret = host_rmi_rtt_unmap_unprotected(
+ rd,
+ map_addr,
+ level,
+ &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_rmi_rtt_unmap_unprotected",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+ } else {
+ ret = host_realm_destroy_undelegate_range(
+ realm,
+ map_addr,
+ rtt_out_addr,
+ map_size);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, addr=0x%lx ret=0x%lx\n",
+ "host_realm_destroy_undelegate_range",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+ }
+ break;
+ case RMI_UNASSIGNED:
+ break;
+ case RMI_TABLE:
+ ret = host_realm_tear_down_rtt_range(realm, level + 1U,
+ map_addr,
+ end_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, map_addr=0x%lx ret=0x%lx\n",
+ "host_realm_tear_down_rtt_range",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_realm_destroy_free_rtt(realm, map_addr,
+ level + 1U,
+ rtt_out_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, map_addr=0x%lx ret=0x%lx\n",
+ "host_realm_destroy_free_rtt",
+ map_addr, ret);
+ return REALM_ERROR;
+ }
+ break;
+ default:
+ return REALM_ERROR;
+ }
+ }
+
+ return REALM_SUCCESS;
+}
+
+u_register_t host_rmi_granule_delegate(u_register_t addr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_GRANULE_DELEGATE, addr}, 2U).ret0;
+}
+
+u_register_t host_rmi_granule_undelegate(u_register_t addr)
+{
+ return host_rmi_handler(&(smc_args) {RMI_GRANULE_UNDELEGATE, addr}, 2U).ret0;
+}
+
+u_register_t host_rmi_version(u_register_t requested_ver)
+{
+ smc_ret_values ret;
+
+ ret = host_rmi_handler(&(smc_args) {RMI_VERSION, requested_ver}, 2U);
+ if (ret.ret0 == (u_register_t)SMC_UNKNOWN) {
+ return SMC_UNKNOWN;
+ }
+ /* Return lower version. */
+ return ret.ret1;
+}
+
+u_register_t host_realm_create(struct realm *realm)
+{
+ struct rmi_realm_params *params;
+ u_register_t ret;
+
+ realm->par_size = REALM_MAX_LOAD_IMG_SIZE;
+
+ realm->state = REALM_STATE_NULL;
+ /*
+ * Allocate memory for PAR - Realm image. Granule delegation
+ * of PAR will be performed during rtt creation.
+ */
+ realm->par_base = (u_register_t)page_alloc(realm->par_size);
+ if (realm->par_base == HEAP_NULL_PTR) {
+ ERROR("page_alloc failed, base=0x%lx, size=0x%lx\n",
+ realm->par_base, realm->par_size);
+ return REALM_ERROR;
+ }
+
+ /* Allocate and delegate RD */
+ realm->rd = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->rd == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for rd\n");
+ goto err_free_par;
+ } else {
+ ret = host_rmi_granule_delegate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", realm->rd, ret);
+ goto err_free_rd;
+ }
+ }
+
+ /* Allocate and delegate RTT */
+ realm->rtt_addr = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->rtt_addr == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for rtt_addr\n");
+ goto err_undelegate_rd;
+ } else {
+ ret = host_rmi_granule_delegate(realm->rtt_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt_addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", realm->rtt_addr, ret);
+ goto err_free_rtt;
+ }
+ }
+
+ /* Allocate memory for params */
+ params = (struct rmi_realm_params *)page_alloc(PAGE_SIZE);
+ if (params == NULL) {
+ ERROR("Failed to allocate memory for params\n");
+ goto err_undelegate_rtt;
+ }
+
+ /* Populate params */
+ params->s2sz = EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0);
+ params->num_bps = realm->num_bps;
+ params->num_wps = realm->num_wps;
+
+ /* SVE enable and vector length */
+ if ((realm->rmm_feat_reg0 & RMI_FEATURE_REGISTER_0_SVE_EN) != 0UL) {
+ params->flags = RMI_REALM_FLAGS_SVE;
+ params->sve_vl = realm->sve_vl;
+ } else {
+ params->flags = 0UL;
+ params->sve_vl = 0U;
+ }
+
+ /* PMU enable and number of event counters */
+ if ((realm->rmm_feat_reg0 & RMI_FEATURE_REGISTER_0_PMU_EN) != 0UL) {
+ params->flags |= RMI_REALM_FLAGS_PMU;
+ params->pmu_num_ctrs = realm->pmu_num_ctrs;
+ } else {
+ params->pmu_num_ctrs = 0U;
+ }
+
+ params->hash_algo = RMI_HASH_SHA_256;
+ params->vmid = vmid++;
+ params->rtt_base = realm->rtt_addr;
+ params->rtt_level_start = 0L;
+ params->rtt_num_start = 1U;
+
+ /* Create Realm */
+ ret = host_rmi_realm_create(realm->rd, (u_register_t)params);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_realm_create", realm->rd, ret);
+ goto err_free_params;
+ }
+
+ realm->vmid = params->vmid;
+ ret = host_rmi_rec_aux_count(realm->rd, &realm->num_aux);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_rec_aux_count", realm->rd, ret);
+ host_rmi_realm_destroy(realm->rd);
+ goto err_free_params;
+ }
+
+ realm->state = REALM_STATE_NEW;
+
+ /* Free params */
+ page_free((u_register_t)params);
+ return REALM_SUCCESS;
+
+err_free_params:
+ page_free((u_register_t)params);
+
+err_undelegate_rtt:
+ ret = host_rmi_granule_undelegate(realm->rtt_addr);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, rtt_addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rtt_addr, ret);
+ }
+
+err_free_rtt:
+ page_free(realm->rtt_addr);
+
+err_undelegate_rd:
+ ret = host_rmi_granule_undelegate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rd, ret);
+ }
+err_free_rd:
+ page_free(realm->rd);
+
+err_free_par:
+ page_free(realm->par_base);
+
+ return REALM_ERROR;
+}
+
+u_register_t host_realm_map_payload_image(struct realm *realm,
+ u_register_t realm_payload_adr)
+{
+ u_register_t src_pa = realm_payload_adr;
+ u_register_t i = 0UL;
+ u_register_t ret;
+
+ /* MAP image regions */
+ while (i < (realm->par_size / PAGE_SIZE)) {
+ ret = host_realm_delegate_map_protected_data(false, realm,
+ realm->par_base + i * PAGE_SIZE,
+ PAGE_SIZE,
+ src_pa + i * PAGE_SIZE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, par_base=0x%lx ret=0x%lx\n",
+ "host_realm_delegate_map_protected_data",
+ realm->par_base, ret);
+ return REALM_ERROR;
+ }
+ i++;
+ }
+
+ return REALM_SUCCESS;
+}
+
+u_register_t host_realm_init_ipa_state(struct realm *realm, u_register_t level,
+ u_register_t start, uint64_t end)
+{
+ u_register_t rd = realm->rd, ret;
+ u_register_t top;
+
+ do {
+ if (level > RTT_MAX_LEVEL) {
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_rtt_init_ripas(rd, start, end, &top);
+ if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+ int cur_level = RMI_RETURN_INDEX(ret);
+
+ if (cur_level < level) {
+ ret = host_rmi_create_rtt_levels(realm,
+ start,
+ cur_level,
+ level);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_create_rtt_levels",
+ ret, __LINE__);
+ return REALM_ERROR;
+ }
+
+ /* Retry with the RTT levels in place */
+ continue;
+ }
+ }
+
+ /* Retry on the next level */
+ level++;
+
+ } while (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT);
+
+ return ret == RMI_SUCCESS ? RMI_SUCCESS : REALM_ERROR;
+}
+
+u_register_t host_realm_map_ns_shared(struct realm *realm,
+ u_register_t ns_shared_mem_adr,
+ u_register_t ns_shared_mem_size)
+{
+ u_register_t i = 0UL;
+ u_register_t ret;
+
+ realm->ipa_ns_buffer = ns_shared_mem_adr |
+ (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0) - 1));
+ realm->ns_buffer_size = ns_shared_mem_size;
+ /* MAP SHARED_NS region */
+ while (i < ns_shared_mem_size / PAGE_SIZE) {
+ ret = host_realm_map_unprotected(realm, ns_shared_mem_adr +
+ (i * PAGE_SIZE), PAGE_SIZE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, par_base=0x%lx ret=0x%lx\n",
+ "host_realm_map_unprotected",
+ (ns_shared_mem_adr + i * PAGE_SIZE), ret);
+ return REALM_ERROR;
+ }
+ i++;
+ }
+ return REALM_SUCCESS;
+}
+
+/* Free AUX pages for rec0 to rec_num */
+static void host_realm_free_rec_aux(u_register_t
+ (*aux_pages)[REC_PARAMS_AUX_GRANULES],
+ unsigned int num_aux, unsigned int rec_num)
+{
+ u_register_t ret;
+
+ assert(rec_num < MAX_REC_COUNT);
+ assert(num_aux <= REC_PARAMS_AUX_GRANULES);
+ for (unsigned int i = 0U; i <= rec_num; i++) {
+ for (unsigned int j = 0U; j < num_aux &&
+ aux_pages[i][j] != 0U; j++) {
+ ret = host_rmi_granule_undelegate(aux_pages[i][j]);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, index=%u,%u ret=0x%lx\n",
+ "host_rmi_granule_undelegate", i, j, ret);
+ }
+ page_free(aux_pages[i][j]);
+ }
+ }
+}
+
+static u_register_t host_realm_alloc_rec_aux(struct realm *realm,
+ struct rmi_rec_params *params, u_register_t rec_num)
+{
+ u_register_t ret;
+ unsigned int j;
+
+ assert(rec_num < MAX_REC_COUNT);
+ for (j = 0U; j < realm->num_aux; j++) {
+ params->aux[j] = (u_register_t)page_alloc(PAGE_SIZE);
+ if (params->aux[j] == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for aux rec\n");
+ return RMI_ERROR_REALM;
+ }
+ ret = host_rmi_granule_delegate(params->aux[j]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, index=%u ret=0x%lx\n",
+ "host_rmi_granule_delegate", j, ret);
+ /*
+ * Free current page,
+ * prev pages freed at host_realm_free_rec_aux
+ */
+ page_free(params->aux[j]);
+ params->aux[j] = 0UL;
+ return RMI_ERROR_REALM;
+ }
+
+ /* We need a copy in Realm object for final destruction */
+ realm->aux_pages_all_rec[rec_num][j] = params->aux[j];
+ }
+ return RMI_SUCCESS;
+}
+
+u_register_t host_realm_rec_create(struct realm *realm)
+{
+ struct rmi_rec_params *rec_params;
+ u_register_t ret;
+ unsigned int i;
+
+ for (i = 0U; i < realm->rec_count; i++) {
+ realm->run[i] = 0U;
+ realm->rec[i] = 0U;
+ realm->mpidr[i] = 0U;
+ }
+ (void)memset(realm->aux_pages_all_rec, 0x0, sizeof(u_register_t) *
+ realm->num_aux*realm->rec_count);
+
+ /* Allocate memory for rec_params */
+ rec_params = (struct rmi_rec_params *)page_alloc(PAGE_SIZE);
+ if (rec_params == NULL) {
+ ERROR("Failed to allocate memory for rec_params\n");
+ return REALM_ERROR;
+ }
+
+ for (i = 0U; i < realm->rec_count; i++) {
+ (void)memset(rec_params, 0x0, PAGE_SIZE);
+
+ /* Allocate memory for run object */
+ realm->run[i] = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->run[i] == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for run\n");
+ goto err_free_mem;
+ }
+ (void)memset((void *)realm->run[i], 0x0, PAGE_SIZE);
+
+ /* Allocate and delegate REC */
+ realm->rec[i] = (u_register_t)page_alloc(PAGE_SIZE);
+ if (realm->rec[i] == HEAP_NULL_PTR) {
+ ERROR("Failed to allocate memory for REC\n");
+ goto err_free_mem;
+ } else {
+ ret = host_rmi_granule_delegate(realm->rec[i]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_delegate", realm->rd, ret);
+ goto err_free_mem;
+ }
+ }
+
+ /* Delegate the required number of auxiliary Granules */
+ ret = host_realm_alloc_rec_aux(realm, rec_params, i);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_realm_alloc_rec_aux",
+ ret);
+ goto err_free_aux;
+ }
+
+ /* Populate rec_params */
+ rec_params->pc = realm->par_base;
+ rec_params->flags = realm->rec_flag[i];
+
+ rec_params->mpidr = (u_register_t)i;
+ rec_params->num_aux = realm->num_aux;
+ realm->mpidr[i] = (u_register_t)i;
+
+ /* Create REC */
+ ret = host_rmi_rec_create(realm->rd, realm->rec[i],
+ (u_register_t)rec_params);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed,index=%u, ret=0x%lx\n",
+ "host_rmi_rec_create", i, ret);
+ goto err_free_aux;
+ }
+ }
+ /* Free rec_params */
+ page_free((u_register_t)rec_params);
+ return REALM_SUCCESS;
+
+err_free_aux:
+ host_realm_free_rec_aux(realm->aux_pages_all_rec, realm->num_aux, i);
+
+err_free_mem:
+ for (unsigned int j = 0U; j <= i ; j++) {
+ ret = host_rmi_granule_undelegate(realm->rec[j]);
+ if (ret != RMI_SUCCESS) {
+ WARN("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rec[j], ret);
+ }
+ page_free(realm->run[j]);
+ page_free(realm->rec[j]);
+ }
+ page_free((u_register_t)rec_params);
+ return REALM_ERROR;
+}
+
+u_register_t host_realm_activate(struct realm *realm)
+{
+ u_register_t ret;
+
+ /* Activate Realm */
+ ret = host_rmi_realm_activate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx\n", "host_rmi_realm_activate",
+ ret);
+ return REALM_ERROR;
+ }
+
+ realm->state = REALM_STATE_ACTIVE;
+
+ return REALM_SUCCESS;
+}
+
+u_register_t host_realm_destroy(struct realm *realm)
+{
+ u_register_t ret;
+
+ if (realm->state == REALM_STATE_NULL) {
+ return REALM_SUCCESS;
+ }
+
+ /* For each REC - Destroy, undelegate and free */
+ for (unsigned int i = 0U; i < realm->rec_count; i++) {
+ if (realm->rec[i] == 0U) {
+ break;
+ }
+
+ ret = host_rmi_rec_destroy(realm->rec[i]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_rec_destroy", realm->rec[i], ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(realm->rec[i]);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rec=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rec[i], ret);
+ return REALM_ERROR;
+ }
+
+ page_free(realm->rec[i]);
+
+ /* Free run object */
+ page_free(realm->run[i]);
+ }
+
+ host_realm_free_rec_aux(realm->aux_pages_all_rec,
+ realm->num_aux, realm->rec_count - 1U);
+
+ /*
+ * For each data granule - Destroy, undelegate and free
+ * RTTs (level 1U and below) must be destroyed leaf-upwards,
+ * using RMI_DATA_DESTROY, RMI_RTT_DESTROY and RMI_GRANULE_UNDELEGATE
+ * commands.
+ */
+ if (host_realm_tear_down_rtt_range(realm, 0UL, 0UL,
+ (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm->rmm_feat_reg0) - 1))) != RMI_SUCCESS) {
+ ERROR("host_realm_tear_down_rtt_range() line=%u\n", __LINE__);
+ return REALM_ERROR;
+ }
+ if (realm->shared_mem_created == true) {
+ if (host_realm_tear_down_rtt_range(realm, 0UL, realm->ipa_ns_buffer,
+ (realm->ipa_ns_buffer + realm->ns_buffer_size)) !=
+ RMI_SUCCESS) {
+ ERROR("host_realm_tear_down_rtt_range() line=%u\n", __LINE__);
+ return REALM_ERROR;
+ }
+ }
+
+ /*
+ * RD Destroy, undelegate and free
+ * RTT(L0) undelegate and free
+ * PAR free
+ */
+ ret = host_rmi_realm_destroy(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_realm_destroy", realm->rd, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(realm->rd);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rd=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rd, ret);
+ return REALM_ERROR;
+ }
+
+ ret = host_rmi_granule_undelegate(realm->rtt_addr);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, rtt_addr=0x%lx ret=0x%lx\n",
+ "host_rmi_granule_undelegate", realm->rtt_addr, ret);
+ return REALM_ERROR;
+ }
+
+ page_free(realm->rd);
+ page_free(realm->rtt_addr);
+ page_free(realm->par_base);
+
+ return REALM_SUCCESS;
+}
+
+unsigned int host_realm_find_rec_by_mpidr(unsigned int mpidr, struct realm *realm)
+{
+ for (unsigned int i = 0U; i < MAX_REC_COUNT; i++) {
+ if (realm->run[i] != 0U && realm->mpidr[i] == mpidr) {
+ return i;
+ }
+ }
+ return MAX_REC_COUNT;
+}
+
+u_register_t host_realm_rec_enter(struct realm *realm,
+ u_register_t *exit_reason,
+ unsigned int *host_call_result,
+ unsigned int rec_num)
+{
+ struct rmi_rec_run *run;
+ u_register_t ret;
+ bool re_enter_rec;
+
+ if (rec_num >= realm->rec_count) {
+ return RMI_ERROR_INPUT;
+ }
+
+ run = (struct rmi_rec_run *)realm->run[rec_num];
+ realm->host_mpidr[rec_num] = read_mpidr_el1();
+ do {
+ re_enter_rec = false;
+ ret = host_rmi_handler(&(smc_args) {RMI_REC_ENTER,
+ realm->rec[rec_num], realm->run[rec_num]}, 3U).ret0;
+ VERBOSE("%s() ret=%lu run->exit.exit_reason=%lu "
+ "run->exit.esr=0x%lx EC_BITS=%u ISS_DFSC_MASK=0x%lx\n",
+ __func__, ret, run->exit.exit_reason, run->exit.esr,
+ ((EC_BITS(run->exit.esr) == EC_DABORT_CUR_EL)),
+ (ISS_BITS(run->exit.esr) & ISS_DFSC_MASK));
+
+ /* If a data abort because of a GPF */
+ if (EC_BITS(run->exit.esr) == EC_DABORT_CUR_EL) {
+ ERROR("EC_BITS(run->exit.esr) == EC_DABORT_CUR_EL\n");
+ if ((ISS_BITS(run->exit.esr) & ISS_DFSC_MASK) ==
+ DFSC_GPF_DABORT) {
+ ERROR("DFSC_GPF_DABORT\n");
+ }
+ }
+
+ if (ret != RMI_SUCCESS) {
+ return ret;
+ }
+
+ if (run->exit.exit_reason == RMI_EXIT_HOST_CALL) {
+ switch (run->exit.imm) {
+ case HOST_CALL_GET_SHARED_BUFF_CMD:
+ run->entry.gprs[0] = realm->ipa_ns_buffer;
+ re_enter_rec = true;
+ break;
+ case HOST_CALL_EXIT_PRINT_CMD:
+ realm_print_handler(realm, run->exit.gprs[0]);
+ re_enter_rec = true;
+ break;
+ case HOST_CALL_EXIT_SUCCESS_CMD:
+ *host_call_result = TEST_RESULT_SUCCESS;
+ break;
+ case HOST_CALL_EXIT_FAILED_CMD:
+ *host_call_result = TEST_RESULT_FAIL;
+ default:
+ break;
+ }
+ }
+ } while (re_enter_rec);
+
+ *exit_reason = run->exit.exit_reason;
+ return ret;
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/host_shared_data.c b/tftf/tests/runtime_services/host_realm_managment/host_shared_data.c
new file mode 100644
index 000000000..b3bfdae10
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/host_shared_data.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <cassert.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_rmi.h>
+#include <host_shared_data.h>
+
+/*
+ * Currently we support only creation of a single Realm in TFTF.
+ * Hence we can assume that Shared area should be sufficient for all
+ * the RECs of this Realm.
+ */
+CASSERT(NS_REALM_SHARED_MEM_SIZE > (MAX_REC_COUNT *
+ sizeof(host_shared_data_t)),
+ too_small_realm_shared_mem_size);
+
+/*
+ * Return shared buffer pointer mapped as host_shared_data_t structure
+ */
+host_shared_data_t *host_get_shared_structure(struct realm *realm_ptr, unsigned int rec_num)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ return &host_shared_data[rec_num];
+}
+
+/*
+ * Set data to be shared from Host to realm
+ */
+void host_shared_data_set_host_val(struct realm *realm_ptr,
+ unsigned int rec_num, uint8_t index, u_register_t val)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ assert(index < MAX_DATA_SIZE);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ host_shared_data[rec_num].host_param_val[index] = val;
+}
+
+/*
+ * Return data shared by realm in realm_out_val.
+ */
+u_register_t host_shared_data_get_realm_val(struct realm *realm_ptr,
+ unsigned int rec_num, uint8_t index)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ assert(index < MAX_DATA_SIZE);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ return host_shared_data[rec_num].realm_out_val[index];
+}
+
+/*
+ * Set command to be send from Host to realm
+ */
+void host_shared_data_set_realm_cmd(struct realm *realm_ptr,
+ uint8_t cmd, unsigned int rec_num)
+{
+ host_shared_data_t *host_shared_data;
+
+ assert(realm_ptr != NULL);
+ assert(rec_num < MAX_REC_COUNT);
+ host_shared_data = (host_shared_data_t *)realm_ptr->host_shared_data;
+ host_shared_data[rec_num].realm_cmd = cmd;
+}
+
diff --git a/tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c b/tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c
new file mode 100644
index 000000000..ce604c0da
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/rmi_delegate_tests.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <arch_features.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_shared_data.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include "rmi_spm_tests.h"
+#include <test_helpers.h>
+
+static test_result_t host_realm_multi_cpu_payload_test(void);
+static test_result_t host_realm_multi_cpu_payload_del_undel(void);
+
+/* Buffer to delegate and undelegate */
+static char bufferdelegate[NUM_GRANULES * GRANULE_SIZE * PLATFORM_CORE_COUNT]
+ __aligned(GRANULE_SIZE);
+static char bufferstate[NUM_GRANULES * PLATFORM_CORE_COUNT];
+
+/*
+ * Overall test for realm payload in three sections:
+ * 1. Single CPU version check: SMC call to realm payload to return
+ * version information
+ * 2. Multi CPU version check: SMC call to realm payload to return
+ * version information from all CPU's in system
+ * 3. Delegate and Undelegate Non-Secure granule via
+ * SMC call to realm payload
+ * 4. Multi CPU delegation where random assignment of states
+ * (realm, non-secure)is assigned to a set of granules.
+ * Each CPU is given a number of granules to delegate in
+ * parallel with the other CPU's
+ * 5. Fail testing of delegation parameters such as
+ * attempting to perform a delegation on the same granule
+ * twice and then testing a misaligned address
+ */
+
+test_result_t host_init_buffer_del(void)
+{
+ u_register_t retrmm;
+
+ host_rmi_init_cmp_result();
+
+ for (uint32_t i = 0; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if ((rand() % 2) == 0) {
+ retrmm = host_rmi_granule_delegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ bufferstate[i] = B_DELEGATED;
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ bufferstate[i] = B_UNDELEGATED;
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Single CPU version check function
+ */
+test_result_t host_realm_version_single_cpu(void)
+{
+ u_register_t retrmm = 0U;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_version(RMI_ABI_VERSION_VAL);
+
+ tftf_testcase_printf("RMM version is: %lu.%lu (expected: %u.%u)\n",
+ RMI_ABI_VERSION_GET_MAJOR(retrmm),
+ RMI_ABI_VERSION_GET_MINOR(retrmm),
+ RMI_ABI_VERSION_GET_MAJOR(RMI_ABI_VERSION_VAL),
+ RMI_ABI_VERSION_GET_MINOR(RMI_ABI_VERSION_VAL));
+
+ return host_cmp_result();
+}
+
+/*
+ * Multi CPU version check function in parallel.
+ */
+test_result_t host_realm_version_multi_cpu(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ host_rmi_init_cmp_result();
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)host_realm_multi_cpu_payload_test, 0);
+
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ ret = host_realm_multi_cpu_payload_test();
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Delegate and Undelegate Non Secure Granule
+ */
+test_result_t host_realm_delegate_undelegate(void)
+{
+ u_register_t retrmm;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_granule_delegate((u_register_t)bufferdelegate);
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ retrmm = host_rmi_granule_undelegate((u_register_t)bufferdelegate);
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Undelegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ tftf_testcase_printf("Delegate and undelegate of buffer 0x%lx succeeded\n",
+ (uintptr_t)bufferdelegate);
+
+ return host_cmp_result();
+}
+
+static test_result_t host_realm_multi_cpu_payload_test(void)
+{
+ u_register_t retrmm = 0U;
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_version(RMI_ABI_VERSION_VAL);
+
+ tftf_testcase_printf("Multi CPU RMM version on CPU %llx is: %lu.%lu\n",
+ (long long)read_mpidr_el1() & MPID_MASK, RMI_ABI_VERSION_GET_MAJOR(retrmm),
+ RMI_ABI_VERSION_GET_MINOR(retrmm));
+
+ return host_cmp_result();
+}
+
+/*
+ * Select all CPU's to randomly delegate/undelegate
+ * granule pages to stress the delegate mechanism
+ */
+test_result_t host_realm_delundel_multi_cpu(void)
+{
+ u_register_t lead_mpid, target_mpid;
+ int cpu_node;
+ long long ret;
+ u_register_t retrmm;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+
+ host_rmi_init_cmp_result();
+
+ if (host_init_buffer_del() == TEST_RESULT_FAIL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(target_mpid,
+ (uintptr_t)host_realm_multi_cpu_payload_del_undel, 0);
+
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("CPU ON failed for 0x%llx\n",
+ (unsigned long long)target_mpid);
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ for_each_cpu(cpu_node) {
+ target_mpid = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+
+ if (lead_mpid == target_mpid) {
+ continue;
+ }
+
+ while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0) !=
+ PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+ /*
+ * Cleanup to set all granules back to undelegated
+ */
+ for (uint32_t i = 0; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if (bufferstate[i] == B_DELEGATED) {
+ retrmm = host_rmi_granule_undelegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ bufferstate[i] = B_UNDELEGATED;
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns fail, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Multi CPU testing of delegate and undelegate of granules
+ * The granules are first randomly initialized to either realm or non secure
+ * using the function init_buffer_del and then the function below
+ * assigns NUM_GRANULES to each CPU for delegation or undelgation
+ * depending upon the initial state
+ */
+static test_result_t host_realm_multi_cpu_payload_del_undel(void)
+{
+ u_register_t retrmm;
+ unsigned int cpu_node;
+
+ cpu_node = platform_get_core_pos(read_mpidr_el1() & MPID_MASK);
+
+ host_rmi_init_cmp_result();
+
+ for (uint32_t i = 0; i < NUM_GRANULES; i++) {
+ if (bufferstate[((cpu_node * NUM_GRANULES) + i)] == B_UNDELEGATED) {
+ retrmm = host_rmi_granule_delegate((u_register_t)
+ &bufferdelegate[((cpu_node * NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_DELEGATED;
+ } else {
+ retrmm = host_rmi_granule_undelegate((u_register_t)
+ &bufferdelegate[((cpu_node * NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_UNDELEGATED;
+ }
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Fail testing of delegation process. The first is an error expected
+ * for processing the same granule twice and the second is submission of
+ * a misaligned address
+ */
+test_result_t host_realm_fail_del(void)
+{
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ u_register_t retrmm;
+
+ host_rmi_init_cmp_result();
+
+ retrmm = host_rmi_granule_delegate((u_register_t)&bufferdelegate[0]);
+ if (retrmm != 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation does not pass as expected for double delegation, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ retrmm = host_rmi_granule_delegate((u_register_t)&bufferdelegate[0]);
+ if (retrmm == 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation does not fail as expected for double delegation, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ retrmm = host_rmi_granule_undelegate((u_register_t)&bufferdelegate[1]);
+ if (retrmm == 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation does not return fail for misaligned address, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ retrmm = host_rmi_granule_undelegate((u_register_t)&bufferdelegate[0]);
+
+ if (retrmm != 0UL) {
+ tftf_testcase_printf
+ ("Delegate operation returns fail for cleanup, %lx\n", retrmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
diff --git a/tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c b/tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c
new file mode 100644
index 000000000..d7a8157b7
--- /dev/null
+++ b/tftf/tests/runtime_services/host_realm_managment/rmi_spm_tests.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <arch_helpers.h>
+#include <cactus_test_cmds.h>
+#include <debug.h>
+#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <host_realm_helper.h>
+#include <lib/events.h>
+#include <lib/power_management.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include "rmi_spm_tests.h"
+#include <spm_test_helpers.h>
+#include <smccc.h>
+#include <test_helpers.h>
+
+static test_result_t realm_multi_cpu_payload_del_undel(void);
+
+#define ECHO_VAL1 U(0xa0a0a0a0)
+#define ECHO_VAL2 U(0xb0b0b0b0)
+#define ECHO_VAL3 U(0xc0c0c0c0)
+#define MAX_REPEATED_TEST 3
+
+/* Buffer to delegate and undelegate */
+static char bufferdelegate[NUM_GRANULES * GRANULE_SIZE * PLATFORM_CORE_COUNT]
+ __aligned(GRANULE_SIZE);
+static char bufferstate[NUM_GRANULES * PLATFORM_CORE_COUNT];
+static int cpu_test_spm_rmi[PLATFORM_CORE_COUNT];
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
+static unsigned int lead_mpid;
+/*
+ * The following test conducts SPM(direct messaging) tests on a subset of selected CPUs while
+ * simultaneously performing another set of tests of the RMI(delegation)
+ * on the remaining CPU's to the full platform count. Once that test completes
+ * the same test is run again with a different assignment for what CPU does
+ * SPM versus RMI.
+ */
+
+/*
+ * Function that randomizes the CPU assignment of tests, SPM or RMI
+ */
+static void rand_cpu_spm_rmi(void)
+{
+ int fentry;
+ int seln = 0;
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ cpu_test_spm_rmi[i] = -1;
+ }
+ for (int i = 0; i < NUM_CPU_DED_SPM; i++) {
+ fentry = 0;
+ while (fentry == 0) {
+#if (PLATFORM_CORE_COUNT > 1)
+ seln = (rand() % (PLATFORM_CORE_COUNT - 1)) + 1;
+#endif
+ if (cpu_test_spm_rmi[seln] == -1) {
+ cpu_test_spm_rmi[seln] = 1;
+ fentry = 1;
+ }
+ }
+ }
+ for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ if (cpu_test_spm_rmi[i] == -1) {
+ cpu_test_spm_rmi[i] = 0;
+ }
+ }
+}
+
+/*
+ * Get function to determine what has been assigned to a given CPU
+ */
+static int spm_rmi_test(unsigned int mpidr)
+{
+ return cpu_test_spm_rmi[platform_get_core_pos(mpidr)];
+}
+
+/*
+ * RMI function to randomize the initial state of granules allocated for the test.
+ * A certain subset will be delegated leaving the rest undelegated
+ */
+static test_result_t init_buffer_del_spm_rmi(void)
+{
+ u_register_t retrmm;
+
+ for (int i = 0; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if ((rand() % 2) == 0) {
+ retrmm = host_rmi_granule_delegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ bufferstate[i] = B_DELEGATED;
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns 0x%lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ bufferstate[i] = B_UNDELEGATED;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t reset_buffer_del_spm_rmi(void)
+{
+ u_register_t retrmm;
+
+ for (uint32_t i = 0U; i < (NUM_GRANULES * PLATFORM_CORE_COUNT) ; i++) {
+ if (bufferstate[i] == B_DELEGATED) {
+ retrmm = host_rmi_granule_undelegate(
+ (u_register_t)&bufferdelegate[i * GRANULE_SIZE]);
+ if (retrmm != 0UL) {
+ ERROR("Undelegate operation returns fail, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ bufferstate[i] = B_UNDELEGATED;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Each CPU reaching this function will send a ready event to all other CPUs
+ * and wait for others CPUs before start executing its callback in parallel
+ * with all others CPUs
+ */
+static test_result_t wait_then_call(test_result_t (*callback)(void))
+{
+ unsigned int mpidr, this_mpidr = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, core_pos;
+ unsigned int this_core_pos = platform_get_core_pos(this_mpidr);
+
+ tftf_send_event_to_all(&cpu_booted[this_core_pos]);
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ /* Ignore myself and the lead core */
+ if (mpidr == this_mpidr || mpidr == lead_mpid) {
+ continue;
+ }
+ core_pos = platform_get_core_pos(mpidr);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+ /* All cores reach this call in approximately "same" time */
+ return (*callback)();
+}
+
+/*
+ * Power on the given cpu and provide it with entrypoint to run and return result
+ */
+static test_result_t run_on_cpu(unsigned int mpidr, uintptr_t cpu_on_handler)
+{
+ int32_t ret;
+
+ ret = tftf_cpu_on(mpidr, cpu_on_handler, 0U);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("tftf_cpu_on mpidr 0x%x returns %d\n", mpidr, ret);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * SPM functions for the direct messaging
+ */
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+ };
+
+static test_result_t send_cactus_echo_cmd(ffa_id_t sender,
+ ffa_id_t dest,
+ uint64_t value)
+{
+ struct ffa_value ret;
+ ret = cactus_echo_send_cmd(sender, dest, value);
+
+ /*
+ * Return responses may be FFA_MSG_SEND_DIRECT_RESP or FFA_INTERRUPT,
+ * but only expect the former. Expect SMC32 convention from SP.
+ */
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) != CACTUS_SUCCESS ||
+ cactus_echo_get_val(ret) != value) {
+ ERROR("Echo Failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Handler that is passed during tftf_cpu_on to individual CPU cores.
+ * Runs a specific core and send a direct message request.
+ * Expects core_pos | SP_ID as a response.
+ */
+static test_result_t run_spm_direct_message(void)
+{
+ unsigned int mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+ test_result_t ret = TEST_RESULT_SUCCESS;
+ struct ffa_value ffa_ret;
+
+ /*
+ * Send a direct message request to SP1 (MP SP) from current physical
+ * CPU. Notice SP1 ECs are already woken as a result of the PSCI_CPU_ON
+ * invocation so they already reached the message loop.
+ * The SPMC uses the MP pinned context corresponding to the physical
+ * CPU emitting the request.
+ */
+ ret = send_cactus_echo_cmd(HYP_ID, SP_ID(1), ECHO_VAL1);
+ if (ret != TEST_RESULT_SUCCESS) {
+ goto out;
+ }
+
+ /*
+ * Secure Partitions beyond the first SP only have their first
+ * EC (or vCPU0) woken up at boot time by the SPMC.
+ * Other ECs need one round of ffa_run to reach the message loop.
+ */
+ ffa_ret = ffa_run(SP_ID(2), core_pos);
+ if (ffa_func_id(ffa_ret) != FFA_MSG_WAIT) {
+ ERROR("Failed to run SP%x on core %u\n", SP_ID(2),
+ core_pos);
+ ret = TEST_RESULT_FAIL;
+ goto out;
+ }
+
+ /*
+ * Send a direct message request to SP2 (MP SP) from current physical
+ * CPU. The SPMC uses the MP pinned context corresponding to the
+ * physical CPU emitting the request.
+ */
+ ret = send_cactus_echo_cmd(HYP_ID, SP_ID(2), ECHO_VAL2);
+ if (ret != TEST_RESULT_SUCCESS) {
+ goto out;
+ }
+
+ /*
+ * Send a direct message request to SP3 (UP SP) from current physical CPU.
+ * The SPMC uses the single vCPU migrated to the new physical core.
+ * The single SP vCPU may receive requests from multiple physical CPUs.
+ * Thus it is possible one message is being processed on one core while
+ * another (or multiple) cores attempt sending a new direct message
+ * request. In such case the cores attempting the new request receive
+ * a busy response from the SPMC. To handle this case a retry loop is
+ * implemented permitting some fairness.
+ */
+ uint32_t trial_loop = 5U;
+ while (trial_loop--) {
+ ffa_ret = cactus_echo_send_cmd(HYP_ID, SP_ID(3), ECHO_VAL3);
+ if ((ffa_func_id(ffa_ret) == FFA_ERROR) &&
+ (ffa_error_code(ffa_ret) == FFA_ERROR_BUSY)) {
+ VERBOSE("%s(%u) trial %u\n", __func__,
+ core_pos, trial_loop);
+ waitms(1);
+ continue;
+ }
+
+ if (is_ffa_direct_response(ffa_ret) == true) {
+ if (cactus_get_response(ffa_ret) != CACTUS_SUCCESS ||
+ cactus_echo_get_val(ffa_ret) != ECHO_VAL3) {
+ ERROR("Echo Failed!\n");
+ ret = TEST_RESULT_FAIL;
+ }
+
+ goto out;
+ }
+ }
+
+ ret = TEST_RESULT_FAIL;
+
+out:
+ return ret;
+}
+
+/*
+ * Secondary core will perform sequentially a call to secure and realm worlds.
+ */
+static test_result_t non_secure_call_secure_and_realm(void)
+{
+ test_result_t result = run_spm_direct_message();
+ if (result != TEST_RESULT_SUCCESS)
+ return result;
+ return realm_multi_cpu_payload_del_undel();
+}
+
+/*
+ * Non secure call secure synchronously in parallel
+ * with all other cores in this test
+ */
+static test_result_t non_secure_call_secure_multi_cpu_sync(void)
+{
+ return wait_then_call(run_spm_direct_message);
+}
+
+/*
+ * Multi CPU testing of delegate and undelegate of granules
+ * The granules are first randomly initialized to either realm or non secure
+ * using the function init_buffer_del and then the function below
+ * assigns NUM_GRANULES to each CPU for delegation or undelgation
+ * depending upon the initial state
+ */
+static test_result_t realm_multi_cpu_payload_del_undel(void)
+{
+ u_register_t retrmm;
+ unsigned int cpu_node;
+
+ cpu_node = platform_get_core_pos(read_mpidr_el1() & MPID_MASK);
+
+ for (int i = 0; i < NUM_GRANULES; i++) {
+ if (bufferstate[((cpu_node * NUM_GRANULES) + i)] == B_UNDELEGATED) {
+ retrmm = host_rmi_granule_delegate((u_register_t)
+ &bufferdelegate[((cpu_node *
+ NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_DELEGATED;
+ } else {
+ retrmm = host_rmi_granule_undelegate((u_register_t)
+ &bufferdelegate[((cpu_node *
+ NUM_GRANULES) + i) * GRANULE_SIZE]);
+ bufferstate[((cpu_node * NUM_GRANULES) + i)] = B_UNDELEGATED;
+ }
+ if (retrmm != 0UL) {
+ tftf_testcase_printf("Delegate operation returns fail, %lx\n",
+ retrmm);
+ return TEST_RESULT_FAIL;
+ }
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Non secure call realm synchronously in parallel
+ * with all other cores in this test
+ */
+static test_result_t non_secure_call_realm_multi_cpu_sync(void)
+{
+ return wait_then_call(realm_multi_cpu_payload_del_undel);
+}
+
+/*
+ * NS world communicate with S and RL worlds in series via SMC from a single core.
+ */
+test_result_t test_spm_rmm_serial_smc(void)
+{
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int mpidr;
+
+ /**********************************************************************
+ * Check SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+
+ host_rmi_init_cmp_result();
+
+ /*
+ * Randomize the initial state of the RMI granules to realm or non-secure
+ */
+ if (init_buffer_del_spm_rmi() == TEST_RESULT_FAIL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Preparation step:
+ * Find another CPU than the lead CPU and power it on.
+ */
+ mpidr = tftf_find_any_cpu_other_than(lead_mpid);
+ assert(mpidr != INVALID_MPID);
+
+ /*
+ * Run SPM direct message call and RMI call in serial on a second core.
+ * wait for core power cycle between each call.
+ */
+ for (size_t i = 0; i < MAX_REPEATED_TEST; i++) {
+ /* SPM FF-A direct message call */
+ if (TEST_RESULT_SUCCESS != run_on_cpu(mpidr,
+ (uintptr_t)non_secure_call_secure_and_realm)) {
+ return TEST_RESULT_FAIL;
+ }
+ /* Wait for the target CPU to finish the test execution */
+ wait_for_core_to_turn_off(mpidr);
+ }
+
+ if (reset_buffer_del_spm_rmi() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Done exiting.\n");
+
+ /**********************************************************************
+ * Report register comparison result
+ **********************************************************************/
+ return host_cmp_result();
+}
+
+/*
+ * Test function to let NS world communicate with S and RL worlds in parallel
+ * via SMC using multiple cores
+ */
+test_result_t test_spm_rmm_parallel_smc(void)
+{
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int cpu_node, mpidr;
+
+ /**********************************************************************
+ * Check SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+
+ host_rmi_init_cmp_result();
+
+ /*
+ * Randomize the initial state of the RMI granules to realm or non-secure
+ */
+ if (init_buffer_del_spm_rmi() == TEST_RESULT_FAIL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Main test to run both SPM and RMM or TRP together in parallel
+ */
+ for (int i = 0; i < MAX_REPEATED_TEST; i++) {
+ VERBOSE("Main test(%d) to run both SPM and RMM or\
+ TRP together in parallel...\n", i);
+
+ /* Reinitialize all CPUs event */
+ for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_booted[i]);
+ }
+
+ /*
+ * Randomise the assignment of the CPU's to either SPM or RMI
+ */
+ rand_cpu_spm_rmi();
+
+ /*
+ * for each CPU we assign it randomly either spm or rmi test function
+ */
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == lead_mpid) {
+ continue;
+ }
+ if (spm_rmi_test(mpidr) == 1) {
+ if (TEST_RESULT_SUCCESS != run_on_cpu(mpidr,
+ (uintptr_t)non_secure_call_secure_multi_cpu_sync)) {
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ if (TEST_RESULT_SUCCESS != run_on_cpu(mpidr,
+ (uintptr_t)non_secure_call_realm_multi_cpu_sync)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ VERBOSE("Waiting for secondary CPUs to turn off ...\n");
+ wait_for_non_lead_cpus();
+ }
+
+ VERBOSE("Done exiting.\n");
+
+ if (reset_buffer_del_spm_rmi() != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /**********************************************************************
+ * Report register comparison result
+ **********************************************************************/
+ return host_cmp_result();
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
new file mode 100644
index 000000000..4774dfc35
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_multiple_rec_tests.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+#include <drivers/arm/arm_gic.h>
+#include <debug.h>
+#include <platform.h>
+#include <plat_topology.h>
+#include <power_management.h>
+#include <psci.h>
+#include <sgi.h>
+#include <test_helpers.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_pmu.h>
+#include <host_shared_data.h>
+
+static uint64_t is_secondary_cpu_on;
+static struct realm realm;
+static struct realm realm1;
+
+/*
+ * Test tries to create max Rec
+ * Enters all Rec from single CPU
+ */
+test_result_t host_realm_multi_rec_single_cpu(void)
+{
+ bool ret1, ret2;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ for (unsigned int i = 0; i < MAX_REC_COUNT; i++) {
+ host_shared_data_set_host_val(&realm, i, HOST_ARG1_INDEX, 10U);
+ ret1 = host_enter_realm_execute(&realm, REALM_SLEEP_CMD,
+ RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ break;
+ }
+ }
+
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test creates 3 Rec
+ * Rec0 requests CPU ON for rec 1
+ * Host denies CPU On for rec 1
+ * Host tried to enter rec 1 and fails
+ * Host re-enters rec 0
+ * Rec 0 checks CPU ON is denied
+ * Rec0 requests CPU ON for rec 2
+ * Host denies CPU On which should fail as rec is runnable
+ * Host allows CPU ON and re-enters rec 0
+ * Rec 0 checks return already_on
+ */
+test_result_t host_realm_multi_rec_psci_denied(void)
+{
+ bool ret1, ret2;
+ u_register_t ret;
+ unsigned int host_call_result;
+ u_register_t exit_reason;
+ unsigned int rec_num;
+ struct rmi_rec_run *run;
+ /* Create 3 rec Rec 0 and 2 are runnable, Rec 1 in not runnable */
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 3U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_PSCI_DENIED_CMD,
+ RMI_EXIT_PSCI, 0U);
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host did not receive CPU ON request\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num != 1U) {
+ ERROR("Invalid mpidr requested\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ INFO("Requesting PSCI Complete Status Denied REC %d\n", rec_num);
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_DENIED);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+ /* Enter rec1, should fail */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 1U);
+ if (ret == RMI_SUCCESS) {
+ ERROR("Rec1 enter should have failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+
+ if (run->exit.gprs[0] != SMC_PSCI_AFFINITY_INFO_AARCH64) {
+ ERROR("Host did not receive PSCI_AFFINITY_INFO request\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num != 1U) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+
+ INFO("Requesting PSCI Complete Affinity Info REC %d\n", rec_num);
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+ /* Re-enter REC0 complete PSCI_AFFINITY_INFO */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+
+
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host did not receive CPU ON request\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num != 2U) {
+ ERROR("Invalid mpidr requested\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+ INFO("Requesting PSCI Complete Status Denied REC %d\n", rec_num);
+ /* PSCI_DENIED should fail as rec2 is RMI_RUNNABLE */
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_DENIED);
+ if (ret == RMI_SUCCESS) {
+ ret1 = false;
+ ERROR("host_rmi_psci_complete should have failed\n");
+ goto destroy_realm;
+ }
+
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("Rec0 re-enter failed\n");
+ ret1 = false;
+ goto destroy_realm;
+ }
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+/* Lock used to avoid concurrent accesses to the secondary_cpu_on counter */
+spinlock_t secondary_cpu_lock;
+
+static test_result_t cpu_on_handler2(void)
+{
+ bool ret;
+
+ spin_lock(&secondary_cpu_lock);
+ is_secondary_cpu_on++;
+ spin_unlock(&secondary_cpu_lock);
+
+ ret = host_enter_realm_execute(&realm, REALM_LOOP_CMD,
+ RMI_EXIT_IRQ, is_secondary_cpu_on);
+ if (!ret) {
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t host_realm_multi_rec_exit_irq(void)
+{
+ bool ret1, ret2;
+ unsigned int rec_count = MAX_REC_COUNT;
+ u_register_t other_mpidr, my_mpidr, ret;
+ int cpu_node;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(rec_count);
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, rec_count)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ is_secondary_cpu_on = 0U;
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ ret1 = host_enter_realm_execute(&realm, REALM_GET_RSI_VERSION, RMI_EXIT_HOST_CALL, 0U);
+ for_each_cpu(cpu_node) {
+ other_mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (other_mpidr == my_mpidr) {
+ continue;
+ }
+ /* Power on the other CPU */
+ ret = tftf_try_cpu_on(other_mpidr, (uintptr_t)cpu_on_handler2, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ goto destroy_realm;
+ }
+ }
+
+ INFO("Wait for all CPU to come up\n");
+ while (is_secondary_cpu_on != (rec_count - 1U)) {
+ waitms(100U);
+ }
+
+destroy_realm:
+ tftf_irq_enable(IRQ_NS_SGI_7, GIC_HIGHEST_NS_PRIORITY);
+ for (unsigned int i = 1U; i < rec_count; i++) {
+ INFO("Raising NS IRQ for rec %d\n", i);
+ host_rec_send_sgi(&realm, IRQ_NS_SGI_7, i);
+ }
+ tftf_irq_disable(IRQ_NS_SGI_7);
+ ret2 = host_destroy_realm(&realm);
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+
+static test_result_t cpu_on_handler(void)
+{
+ bool ret;
+ struct rmi_rec_run *run;
+ unsigned int i;
+
+ spin_lock(&secondary_cpu_lock);
+ i = ++is_secondary_cpu_on;
+ spin_unlock(&secondary_cpu_lock);
+ ret = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, i);
+ if (ret) {
+ run = (struct rmi_rec_run *)realm.run[i];
+ if (run->exit.gprs[0] == SMC_PSCI_CPU_OFF) {
+ return TEST_RESULT_SUCCESS;
+ }
+ }
+ ERROR("Rec %d failed\n", i);
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * The test creates a realm with MAX recs
+ * On receiving PSCI_CPU_ON call from REC0 for all other recs,
+ * the test completes the PSCI call and re-enters REC0.
+ * Turn ON secondary CPUs upto a max of MAX_REC_COUNT.
+ * Each of the secondary then enters Realm with a different REC
+ * and executes the test REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD in Realm payload.
+ * It is expected that the REC will exit with PSCI_CPU_OFF as the exit reason.
+ * REC00 checks if all other CPUs are off, via PSCI_AFFINITY_INFO.
+ * Host completes the PSCI requests.
+ */
+test_result_t host_realm_multi_rec_multiple_cpu(void)
+{
+ bool ret1, ret2;
+ test_result_t ret3 = TEST_RESULT_FAIL;
+ int ret = RMI_ERROR_INPUT;
+ u_register_t rec_num;
+ u_register_t other_mpidr, my_mpidr;
+ struct rmi_rec_run *run;
+ unsigned int host_call_result, i = 0U;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE};
+ u_register_t exit_reason;
+ int cpu_node;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(MAX_REC_COUNT);
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ is_secondary_cpu_on = 0U;
+ init_spinlock(&secondary_cpu_lock);
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, MAX_REC_COUNT);
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, 0U);
+ if (!ret1) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ while (true) {
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ /* Re-enter REC0 complete CPU_ON */
+ ret = host_realm_rec_enter(&realm, &exit_reason,
+ &host_call_result, 0U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_PSCI) {
+ break;
+ }
+ } else {
+ ERROR("host_rmi_psci_complete failed\n");
+ goto destroy_realm;
+ }
+ }
+ if (exit_reason != RMI_EXIT_HOST_CALL || host_call_result != TEST_RESULT_SUCCESS) {
+ ERROR("Realm failed\n");
+ goto destroy_realm;
+ }
+
+ /* Turn on all CPUs */
+ for_each_cpu(cpu_node) {
+ if (i == (MAX_REC_COUNT - 1U)) {
+ break;
+ }
+ other_mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (other_mpidr == my_mpidr) {
+ continue;
+ }
+
+ /* Power on the other CPU */
+ ret = tftf_try_cpu_on(other_mpidr, (uintptr_t)cpu_on_handler, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("TFTF CPU ON failed\n");
+ goto destroy_realm;
+ }
+ i++;
+ }
+
+ while (true) {
+ /* Re-enter REC0 complete PSCI_AFFINITY_INFO */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("Rec0 re-enter failed\n");
+ goto destroy_realm;
+ }
+ if (run->exit.gprs[0] != SMC_PSCI_AFFINITY_INFO_AARCH64) {
+ break;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete failed\n");
+ goto destroy_realm;
+ }
+ }
+
+ if (ret == RMI_SUCCESS && exit_reason == RMI_EXIT_HOST_CALL) {
+ ret3 = host_call_result;
+ }
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if ((ret != RMI_SUCCESS) || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return ret3;
+}
+
+/*
+ * Test creates 2 realms with multiple recs
+ * realm1, rec1 requests CPU_ON for rec2
+ * Host calls PSCI_COMPLETE with wrong rec3, checks for error
+ * Host calls PSCI_COMPLETE with wrong rec from different realm, checks for error
+ * Host calls PSCI_COMPLETE with correct rec, checks for success
+ * Host attempts to execute rec which is NOT_RUNNABLE, checks for error
+ */
+test_result_t host_realm_multi_rec_multiple_cpu2(void)
+{
+ bool ret1, ret2;
+ test_result_t ret3 = TEST_RESULT_FAIL;
+ int ret = RMI_ERROR_INPUT;
+ u_register_t rec_num;
+ struct rmi_rec_run *run;
+ unsigned int host_call_result;
+ struct realm realm2;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE, RMI_NOT_RUNNABLE,
+ RMI_NOT_RUNNABLE};
+ u_register_t exit_reason;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_activate_realm_payload(&realm2, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE + PAGE_POOL_MAX_SIZE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ ret2 = host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* Realm to request CPU_ON for rec 2 */
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, 2U);
+ ret1 = host_enter_realm_execute(&realm, REALM_MULTIPLE_REC_MULTIPLE_CPU_CMD,
+ RMI_EXIT_PSCI, 0U);
+ if (!ret1) {
+ ERROR("Host did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.gprs[0] != SMC_PSCI_CPU_ON_AARCH64) {
+ ERROR("Host2 did not receive CPU ON request\n");
+ goto destroy_realm;
+ }
+ rec_num = host_realm_find_rec_by_mpidr(run->exit.gprs[1], &realm);
+ if (rec_num >= MAX_REC_COUNT) {
+ ERROR("Invalid mpidr requested\n");
+ goto destroy_realm;
+ }
+
+ /* pass wrong target_rec, expect error */
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num + 1U],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete wrong target_rec didn't fail ret=%x\n",
+ ret);
+ goto destroy_realm;
+ }
+
+ /* pass wrong target_rec from different realm, expect error */
+ ret = host_rmi_psci_complete(realm.rec[0], realm2.rec[0U],
+ (unsigned long)PSCI_E_SUCCESS);
+ if (ret == RMI_SUCCESS) {
+ ERROR("host_rmi_psci_complete wrong target_rec didn't fail ret=%x\n",
+ ret);
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_psci_complete(realm.rec[0], realm.rec[rec_num],
+ (unsigned long)PSCI_E_SUCCESS);
+
+ /* Try to run Rec3(CPU OFF/NOT_RUNNABLE), expect error */
+ ret = host_realm_rec_enter(&realm, &exit_reason,
+ &host_call_result, 3U);
+
+ if (ret == RMI_SUCCESS) {
+ ERROR("Expected error\n");
+ goto destroy_realm;
+ }
+ ret3 = TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ ret1 = host_destroy_realm(&realm);
+ ret2 = host_destroy_realm(&realm2);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): failed destroy=%d, %d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+ return ret3;
+}
+
+/*
+ * Test PMU counters available to each REC matches that programmed by host
+ * Test PMU counters are preserved for each rec
+ */
+static test_result_t cpu_on_handler_pmu(void)
+{
+ bool ret1;
+ unsigned int i;
+
+ spin_lock(&secondary_cpu_lock);
+ i = is_secondary_cpu_on++;
+ spin_unlock(&secondary_cpu_lock);
+ ret1 = host_enter_realm_execute(&realm, REALM_PMU_COUNTER, RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ return TEST_RESULT_FAIL;
+ }
+ ret1 = host_enter_realm_execute(&realm1, REALM_PMU_COUNTER, RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ return TEST_RESULT_FAIL;
+ }
+ ret1 = host_enter_realm_execute(&realm, REALM_PMU_PRESERVE, RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ return TEST_RESULT_FAIL;
+ }
+ ret1 = host_enter_realm_execute(&realm1, REALM_PMU_PRESERVE, RMI_EXIT_HOST_CALL, i);
+ if (ret1) {
+ return TEST_RESULT_SUCCESS;
+ }
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * Test realm creation with more PMU counter than available, expect failure
+ * Test realm creation with 0 PMU counter
+ * expect failure if FEAT_HPMN0 is not supported
+ * expect success if FEAT_HPMN0 is supported
+ * Create 2 Realms first one with MAX PMU counters
+ * second realm with lesser PMU counter than available
+ * Schedule multiple rec on multiple CPU
+ * Test PMU counters available to each REC matches that programmed by host
+ * Test PMU counters are preserved for each rec
+ */
+test_result_t host_realm_pmuv3_mul_rec(void)
+{
+ u_register_t feature_flag;
+ u_register_t rmm_feat_reg0;
+ u_register_t rec_flag[8U] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE};
+ bool ret1 = 0U, ret2;
+ unsigned int num_cnts, i = 0U;
+ u_register_t other_mpidr, my_mpidr, ret;
+ int cpu_node;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ /* Get Max PMU counter implemented through RMI_FEATURES */
+ if (host_rmi_features(0UL, &rmm_feat_reg0) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_rmi_features");
+ return TEST_RESULT_FAIL;
+ }
+
+ num_cnts = EXTRACT(RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS, rmm_feat_reg0);
+ host_set_pmu_state();
+ is_secondary_cpu_on = 0;
+ my_mpidr = read_mpidr_el1() & MPID_MASK;
+
+ if (num_cnts == 0U) {
+ ERROR("No PMU counters implemented\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ feature_flag = RMI_FEATURE_REGISTER_0_PMU_EN |
+ INPLACE(FEATURE_PMU_NUM_CTRS, num_cnts + 1U);
+
+
+ /* Request more PMU counter than total, expect failure */
+ if (host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, 1U)) {
+ ERROR("Realm create should have failed\n");
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Request 0 PMU counter */
+ feature_flag = RMI_FEATURE_REGISTER_0_PMU_EN |
+ INPLACE(FEATURE_PMU_NUM_CTRS, 0U);
+
+ ret1 = host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, 1U);
+
+ if (!get_feat_hpmn0_supported()) {
+ if (ret1) {
+ ERROR("Realm create with 0 PMU Counter should have failed\n");
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+ }
+ } else {
+ if (!ret1) {
+ ERROR("Realm create with 0 PMU Counter should not have failed\n");
+ return TEST_RESULT_FAIL;
+ }
+ host_destroy_realm(&realm);
+ }
+
+ /* Test 2 create first realm with max PMU counters */
+ feature_flag = RMI_FEATURE_REGISTER_0_PMU_EN |
+ INPLACE(FEATURE_PMU_NUM_CTRS, num_cnts);
+
+ /* Prepare realm0, create recs for realm0 later */
+ if (!host_prepare_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, MAX_REC_COUNT)) {
+ goto test_exit;
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto test_exit;
+ }
+
+ /* Second realm with less num of PMU counters */
+ feature_flag = RMI_FEATURE_REGISTER_0_PMU_EN |
+ INPLACE(FEATURE_PMU_NUM_CTRS, num_cnts - 1U);
+
+ if (!host_create_activate_realm_payload(&realm1, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE + PAGE_POOL_MAX_SIZE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, MAX_REC_COUNT)) {
+ goto test_exit2;
+ }
+ if (!host_create_shared_mem(&realm1, NS_REALM_SHARED_MEM_BASE + NS_REALM_SHARED_MEM_SIZE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto test_exit2;
+ }
+
+ /* create realm0 recs, activate realm0 */
+ if (host_realm_rec_create(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_rec_create");
+ goto test_exit2;
+ }
+
+ if (host_realm_init_ipa_state(&realm, 0U, 0U, 1ULL << 32)
+ != RMI_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_init_ipa_state");
+ goto test_exit2;
+ }
+
+ if (host_realm_activate(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto test_exit2;
+ }
+ INFO("MAX PMU Counter=%u\n", num_cnts);
+
+ /* Pass num of PMU counters programmed to realm */
+ for (unsigned int j = 0U; j < MAX_REC_COUNT; j++) {
+ host_shared_data_set_host_val(&realm, j, HOST_ARG1_INDEX, num_cnts);
+ host_shared_data_set_host_val(&realm1, j, HOST_ARG1_INDEX, num_cnts - 1U);
+ }
+
+ /*
+ * Enter realm0 rec0 test PMU counters available is same as that programmed by host
+ * Validation is done by the Realm and will return error if the count does not match
+ */
+ ret1 = host_enter_realm_execute(&realm, REALM_PMU_COUNTER, RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ goto test_exit2;
+ }
+
+ /* Enter realm1 rec0 test PMU counters available is same as that programmed by host */
+ ret1 = host_enter_realm_execute(&realm1, REALM_PMU_COUNTER, RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ goto test_exit2;
+ }
+
+ /* Test if Realm0 rec0 entering/exiting preserves PMU state */
+ ret1 = host_enter_realm_execute(&realm, REALM_PMU_PRESERVE, RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ goto test_exit2;
+ }
+
+ /* Test if Realm1 rec0 entering/exiting preserves PMU state */
+ ret1 = host_enter_realm_execute(&realm1, REALM_PMU_PRESERVE, RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ goto test_exit2;
+ }
+
+ /* Turn on all CPUs */
+ for_each_cpu(cpu_node) {
+ if (i == (MAX_REC_COUNT - 1U)) {
+ break;
+ }
+ other_mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (other_mpidr == my_mpidr) {
+ continue;
+ }
+
+ /* Power on the other CPU */
+ ret = tftf_try_cpu_on(other_mpidr, (uintptr_t)cpu_on_handler_pmu, 0);
+ if (ret != PSCI_E_SUCCESS) {
+ ERROR("TFTF CPU ON failed\n");
+ goto test_exit2;
+ }
+ i++;
+ }
+
+ /* Wait for all CPU to power up */
+ while (is_secondary_cpu_on != MAX_REC_COUNT - 1U) {
+ waitms(100);
+ }
+
+ /* Wait for all CPU to power down */
+ for_each_cpu(cpu_node) {
+ other_mpidr = tftf_get_mpidr_from_node(cpu_node) & MPID_MASK;
+ if (other_mpidr == my_mpidr) {
+ continue;
+ }
+ while (tftf_psci_affinity_info(other_mpidr, MPIDR_AFFLVL0) != PSCI_STATE_OFF) {
+ continue;
+ }
+ }
+
+test_exit2:
+ ret2 = host_destroy_realm(&realm1);
+ if (!ret1 || !ret2) {
+ ERROR("%s() enter=%u destroy=%u\n", __func__, ret1, ret2);
+ }
+test_exit:
+ ret2 = host_destroy_realm(&realm);
+ if (!ret1 || !ret2) {
+ ERROR("%s() enter=%u destroy=%u\n", __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_check_pmu_state()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c
new file mode 100644
index 000000000..c19098589
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_simd_tests.c
@@ -0,0 +1,1365 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <assert.h>
+#include <arch_features.h>
+#include <debug.h>
+#include <test_helpers.h>
+#include <lib/extensions/fpu.h>
+#include <lib/extensions/sme.h>
+#include <lib/extensions/sve.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_simd.h>
+#include <host_shared_data.h>
+
+#define NS_SVE_OP_ARRAYSIZE 1024U
+#define SVE_TEST_ITERATIONS 50U
+
+/* Min test iteration count for 'host_and_realm_check_simd' test */
+#define TEST_ITERATIONS_MIN (16U)
+
+/* Number of FPU configs: none */
+#define NUM_FPU_CONFIGS (0U)
+
+/* Number of SVE configs: SVE_VL, SVE hint */
+#define NUM_SVE_CONFIGS (2U)
+
+/* Number of SME configs: SVE_SVL, FEAT_FA64, Streaming mode */
+#define NUM_SME_CONFIGS (3U)
+
+#define NS_NORMAL_SVE 0x1U
+#define NS_STREAMING_SVE 0x2U
+
+typedef enum security_state {
+ NONSECURE_WORLD = 0U,
+ REALM_WORLD,
+ SECURITY_STATE_MAX
+} security_state_t;
+
+typedef enum {
+ TEST_FPU = 0U,
+ TEST_SVE,
+ TEST_SME,
+} simd_test_t;
+
+static int ns_sve_op_1[NS_SVE_OP_ARRAYSIZE];
+static int ns_sve_op_2[NS_SVE_OP_ARRAYSIZE];
+
+static sve_z_regs_t ns_sve_z_regs_write;
+static sve_z_regs_t ns_sve_z_regs_read;
+
+static sve_p_regs_t ns_sve_p_regs_write;
+static sve_p_regs_t ns_sve_p_regs_read;
+
+static sve_ffr_regs_t ns_sve_ffr_regs_write;
+static sve_ffr_regs_t ns_sve_ffr_regs_read;
+
+static fpu_q_reg_t ns_fpu_q_regs_write[FPU_Q_COUNT];
+static fpu_q_reg_t ns_fpu_q_regs_read[FPU_Q_COUNT];
+
+static fpu_cs_regs_t ns_fpu_cs_regs_write;
+static fpu_cs_regs_t ns_fpu_cs_regs_read;
+
+static struct realm realm;
+
+/* Skip test if SVE is not supported in H/W or in RMI features */
+#define CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(_reg0) \
+ do { \
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED(); \
+ \
+ /* Get RMM support for SVE and its max SVE VL */ \
+ if (host_rmi_features(0UL, &_reg0) != REALM_SUCCESS) { \
+ ERROR("Failed to get RMI feat_reg0\n"); \
+ return TEST_RESULT_FAIL; \
+ } \
+ \
+ /* SVE not supported in RMI features? */ \
+ if ((_reg0 & RMI_FEATURE_REGISTER_0_SVE_EN) == 0UL) { \
+ ERROR("SVE not in RMI features, skipping\n"); \
+ return TEST_RESULT_SKIPPED; \
+ } \
+ } while (false)
+
+static test_result_t host_create_sve_realm_payload(bool sve_en, uint8_t sve_vq)
+{
+ u_register_t feature_flag;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+
+ if (sve_en) {
+ feature_flag = RMI_FEATURE_REGISTER_0_SVE_EN |
+ INPLACE(FEATURE_SVE_VL, sve_vq);
+ } else {
+ feature_flag = 0UL;
+ }
+
+ /* Initialise Realm payload */
+ if (!host_create_activate_realm_payload(&realm,
+ (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Create shared memory between Host and Realm */
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * RMI should report SVE VL in RMI features and it must be the same value as the
+ * max SVE VL seen by the NS world.
+ */
+test_result_t host_check_rmi_reports_proper_sve_vl(void)
+{
+ u_register_t rmi_feat_reg0;
+ uint8_t rmi_sve_vq;
+ uint8_t ns_sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ rmi_sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /*
+ * Configure NS to arch supported max VL and get the value reported
+ * by rdvl
+ */
+ sve_config_vq(SVE_VQ_ARCH_MAX);
+ ns_sve_vq = SVE_VL_TO_VQ(sve_rdvl_1());
+
+ if (rmi_sve_vq != ns_sve_vq) {
+ ERROR("RMI max SVE VL %u bits don't match NS max "
+ "SVE VL %u bits\n", SVE_VQ_TO_BITS(rmi_sve_vq),
+ SVE_VQ_TO_BITS(ns_sve_vq));
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/* Test Realm creation with SVE enabled and run command rdvl */
+test_result_t host_sve_realm_cmd_rdvl(void)
+{
+ host_shared_data_t *sd;
+ struct sve_cmd_rdvl *rl_output;
+ uint8_t sve_vq, rl_max_sve_vq;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ rc = host_create_sve_realm_payload(true, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ ERROR("Failed to create Realm with SVE\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_RDVL,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (realm_rc != true) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ /* Check if rdvl matches the SVE VL created */
+ sd = host_get_shared_structure(&realm, 0U);
+ rl_output = (struct sve_cmd_rdvl *)sd->realm_cmd_output_buffer;
+ rl_max_sve_vq = SVE_VL_TO_VQ(rl_output->rdvl);
+ if (sve_vq == rl_max_sve_vq) {
+ rc = TEST_RESULT_SUCCESS;
+ } else {
+ ERROR("Realm created with max VL: %u bits, but Realm reported "
+ "max VL as: %u bits\n", SVE_VQ_TO_BITS(sve_vq),
+ SVE_VQ_TO_BITS(rl_max_sve_vq));
+ rc = TEST_RESULT_FAIL;
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Test Realm creation with SVE enabled but with invalid SVE VL */
+test_result_t host_sve_realm_test_invalid_vl(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /*
+ * Pass a sve_vq that is greater than the value supported by RMM
+ * and check whether creating Realm fails
+ */
+ rc = host_create_sve_realm_payload(true, (sve_vq + 1));
+ if (rc == TEST_RESULT_SUCCESS) {
+ ERROR("Error: Realm created with invalid SVE VL %u\n", (sve_vq + 1));
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t _host_sve_realm_check_id_registers(bool sve_en)
+{
+ host_shared_data_t *sd;
+ struct sve_cmd_id_regs *r_regs;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ bool realm_rc;
+ uint8_t sve_vq = 0U;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (sve_en) {
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+ }
+
+ rc = host_create_sve_realm_payload(sve_en, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_ID_REGISTERS,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ sd = host_get_shared_structure(&realm, 0U);
+ r_regs = (struct sve_cmd_id_regs *)sd->realm_cmd_output_buffer;
+
+ /* Check ID register SVE flags */
+ if (sve_en) {
+ rc = TEST_RESULT_SUCCESS;
+ if (EXTRACT(ID_AA64PFR0_SVE, r_regs->id_aa64pfr0_el1) == 0UL) {
+ ERROR("ID_AA64PFR0_EL1: SVE not enabled\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ if (r_regs->id_aa64zfr0_el1 == 0UL) {
+ ERROR("ID_AA64ZFR0_EL1: No SVE features present\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ if (EXTRACT(ID_AA64PFR0_SVE, r_regs->id_aa64pfr0_el1) != 0UL) {
+ ERROR("ID_AA64PFR0_EL1: SVE enabled\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ if (r_regs->id_aa64zfr0_el1 != 0UL) {
+ ERROR("ID_AA64ZFR0_EL1: Realm reported non-zero value\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+rm_realm:
+ host_destroy_realm(&realm);
+ return rc;
+}
+
+/* Test ID_AA64PFR0_EL1, ID_AA64ZFR0_EL1_SVE values in SVE Realm */
+test_result_t host_sve_realm_cmd_id_registers(void)
+{
+ return _host_sve_realm_check_id_registers(true);
+}
+
+/* Test ID_AA64PFR0_EL1, ID_AA64ZFR0_EL1_SVE values in non SVE Realm */
+test_result_t host_non_sve_realm_cmd_id_registers(void)
+{
+ return _host_sve_realm_check_id_registers(false);
+}
+
+static void print_sve_vl_bitmap(uint32_t vl_bitmap)
+{
+ for (uint8_t vq = 0U; vq <= SVE_VQ_ARCH_MAX; vq++) {
+ if ((vl_bitmap & BIT_32(vq)) != 0U) {
+ INFO("\t%u\n", SVE_VQ_TO_BITS(vq));
+ }
+ }
+}
+
+/* Create SVE Realm and probe all the supported VLs */
+test_result_t host_sve_realm_cmd_probe_vl(void)
+{
+ host_shared_data_t *sd;
+ struct sve_cmd_probe_vl *rl_output;
+ uint32_t vl_bitmap_expected;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ bool realm_rc;
+ uint8_t sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ rc = host_create_sve_realm_payload(true, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Configure TFTF with sve_vq and probe all VLs and compare it with
+ * the bitmap returned from Realm
+ */
+ vl_bitmap_expected = sve_probe_vl(sve_vq);
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_PROBE_VL,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ sd = host_get_shared_structure(&realm, 0U);
+ rl_output = (struct sve_cmd_probe_vl *)sd->realm_cmd_output_buffer;
+
+ INFO("Supported SVE vector length in bits (expected):\n");
+ print_sve_vl_bitmap(vl_bitmap_expected);
+
+ INFO("Supported SVE vector length in bits (probed):\n");
+ print_sve_vl_bitmap(rl_output->vl_bitmap);
+
+ if (vl_bitmap_expected == rl_output->vl_bitmap) {
+ rc = TEST_RESULT_SUCCESS;
+ } else {
+ rc = TEST_RESULT_FAIL;
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Check whether RMM preserves NS ZCR_EL2 register. */
+test_result_t host_sve_realm_check_config_register(void)
+{
+ u_register_t ns_zcr_el2, ns_zcr_el2_cur;
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ rc = host_create_sve_realm_payload(true, vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Configure TFTF from 0 to SVE_VQ_ARCH_MAX, and in each iteration check
+ * if NS ZCR_EL2 is same before and after call to run Realm.
+ */
+ rc = TEST_RESULT_SUCCESS;
+ for (vq = 0U; vq <= SVE_VQ_ARCH_MAX; vq++) {
+ bool realm_rc;
+
+ sve_config_vq(vq);
+ ns_zcr_el2 = read_zcr_el2();
+
+ /* Call Realm to run SVE command */
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_RDVL,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ ERROR("Realm command REALM_SVE_RDVL failed\n");
+ rc = TEST_RESULT_FAIL;
+ break;
+ }
+ ns_zcr_el2_cur = read_zcr_el2();
+
+ if (ns_zcr_el2 != ns_zcr_el2_cur) {
+ ERROR("NS ZCR_EL2 expected: 0x%lx, got: 0x%lx\n",
+ ns_zcr_el2, ns_zcr_el2_cur);
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Sends command to Realm to do SVE operations, while NS is also doing SVE
+ * operations.
+ * Returns:
+ * false - On success
+ * true - On failure
+ */
+static bool callback_realm_do_sve(void)
+{
+
+ return !host_enter_realm_execute(&realm, REALM_SVE_OPS,
+ RMI_EXIT_HOST_CALL, 0U);
+}
+
+/*
+ * Sends command to Realm to do SVE operations, while NS is also doing SVE
+ * operations.
+ * Returns:
+ * false - On success
+ * true - On failure
+ */
+static bool callback_realm_do_fpu(void)
+{
+ return !host_enter_realm_execute(&realm, REALM_REQ_FPU_FILL_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+}
+
+static test_result_t run_sve_vectors_operations(bool realm_sve_en,
+ uint8_t realm_sve_vq,
+ int ns_sve_mode)
+{
+ bool (*realm_callback)(void);
+ test_result_t rc;
+ bool cb_err;
+ unsigned int i;
+ int val;
+
+ rc = host_create_sve_realm_payload(realm_sve_en, realm_sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /* Get at random value to do sve_subtract */
+ val = rand();
+ for (i = 0U; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ ns_sve_op_1[i] = val - i;
+ ns_sve_op_2[i] = 1;
+ }
+
+ if (realm_sve_en) {
+ realm_callback = callback_realm_do_sve;
+ } else {
+ realm_callback = callback_realm_do_fpu;
+ }
+
+ for (i = 0U; i < SVE_TEST_ITERATIONS; i++) {
+ /* Config NS world with random SVE VL or SVE SVL */
+ if (ns_sve_mode == NS_NORMAL_SVE) {
+ sve_config_vq(SVE_GET_RANDOM_VQ);
+ } else {
+ sme_config_svq(SME_GET_RANDOM_SVQ);
+ }
+
+ /* Perform SVE operations with intermittent calls to Realm */
+ cb_err = sve_subtract_arrays_interleaved(ns_sve_op_1,
+ ns_sve_op_1,
+ ns_sve_op_2,
+ NS_SVE_OP_ARRAYSIZE,
+ realm_callback);
+ if (cb_err) {
+ ERROR("Callback to realm failed\n");
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+ }
+
+ /* Check result of SVE operations. */
+ rc = TEST_RESULT_SUCCESS;
+
+ for (i = 0U; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ if (ns_sve_op_1[i] != (val - i - SVE_TEST_ITERATIONS)) {
+ ERROR("%s op failed at idx: %u, expected: 0x%x received:"
+ " 0x%x\n", (ns_sve_mode == NS_NORMAL_SVE) ?
+ "SVE" : "SVE", i,
+ (val - i - SVE_TEST_ITERATIONS), ns_sve_op_1[i]);
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Intermittently switch to Realm while doing NS is doing SVE ops in Normal
+ * SVE mode.
+ *
+ * This testcase runs for SVE only config or SVE + SME config
+ */
+test_result_t host_sve_realm_check_vectors_operations(void)
+{
+ u_register_t rmi_feat_reg0;
+ uint8_t realm_sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ realm_sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /* Run SVE operations in Normal SVE mode */
+ return run_sve_vectors_operations(true, realm_sve_vq, NS_NORMAL_SVE);
+}
+
+/*
+ * Intermittently switch to Realm while doing NS is doing SVE ops in Streaming
+ * SVE mode
+ *
+ * This testcase runs for SME only config or SVE + SME config
+ */
+test_result_t host_sve_realm_check_streaming_vectors_operations(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t realm_sve_vq;
+ bool realm_sve_en;
+
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (is_armv8_2_sve_present()) {
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+ realm_sve_en = true;
+ realm_sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL,
+ rmi_feat_reg0);
+ } else {
+ realm_sve_en = 0;
+ realm_sve_vq = 0;
+ }
+
+ /* Enter Streaming SVE mode */
+ sme_smstart(SMSTART_SM);
+
+ /* Run SVE operations in Streaming SVE mode */
+ rc = run_sve_vectors_operations(realm_sve_en, realm_sve_vq,
+ NS_STREAMING_SVE);
+
+ /* Exit Streaming SVE mode */
+ sme_smstop(SMSTOP_SM);
+
+ return rc;
+}
+
+/*
+ * Check if RMM leaks Realm SVE registers.
+ * This test is skipped if the supported max VQ is 128 bits, as we won't be able
+ * to run NS and Realm context with lower and higher VQ respectively.
+ * This test does the below steps:
+ *
+ * 1. Set NS SVE VQ to max and write known pattern
+ * 2. NS programs ZCR_EL2 with VQ as 0 (128 bits).
+ * 3. Create Realm with max VQ (higher than NS SVE VQ).
+ * 4. Call Realm to fill in Z registers
+ * 5. Once Realm returns, NS sets ZCR_EL2 with max VQ and reads the Z registers
+ * 6. The upper bits of Z registers must be either 0 or the old values filled by
+ * NS world at step 1.
+ */
+test_result_t host_sve_realm_check_vectors_leaked(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint64_t bitmap;
+ bool realm_rc;
+ uint8_t sve_vq;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /* Skip test if the supported max VQ is 128 bits */
+ if (sve_vq == SVE_VQ_ARCH_MIN) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* 1. Set NS SVE VQ to max and write known pattern */
+ sve_config_vq(sve_vq);
+ (void)memset((void *)&ns_sve_z_regs_write, 0xAA,
+ SVE_VQ_TO_BYTES(sve_vq) * SVE_NUM_VECTORS);
+ sve_z_regs_write(&ns_sve_z_regs_write);
+
+ /* 2. NS programs ZCR_EL2 with VQ as 0 */
+ sve_config_vq(SVE_VQ_ARCH_MIN);
+
+ /* 3. Create Realm with max VQ (higher than NS SVE VQ) */
+ rc = host_create_sve_realm_payload(true, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /* 4. Call Realm to fill in Z registers */
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_FILL_REGS,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ /* 5. NS sets ZCR_EL2 with max VQ and reads the Z registers */
+ sve_config_vq(sve_vq);
+ sve_z_regs_read(&ns_sve_z_regs_read);
+
+ /*
+ * 6. The upper bits in Z vectors (sve_vq - SVE_VQ_ARCH_MIN) must
+ * be either 0 or the old values filled by NS world.
+ * TODO: check if upper bits are zero
+ */
+ bitmap = sve_z_regs_compare(&ns_sve_z_regs_write, &ns_sve_z_regs_read);
+ if (bitmap != 0UL) {
+ ERROR("SVE Z regs compare failed (bitmap: 0x%016llx)\n",
+ bitmap);
+ rc = TEST_RESULT_FAIL;
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ }
+
+rm_realm:
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Create a non SVE Realm and try to access SVE, the Realm must receive
+ * undefined abort.
+ */
+test_result_t host_non_sve_realm_check_undef_abort(void)
+{
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
+
+ rc = host_create_sve_realm_payload(false, 0);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_UNDEF_ABORT,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ ERROR("Realm didn't receive undefined abort\n");
+ rc = TEST_RESULT_FAIL;
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Generate random values and write it to SVE Z, P and FFR registers */
+static void ns_sve_write_rand(void)
+{
+ bool has_ffr = true;
+
+ if (is_feat_sme_supported() && sme_smstat_sm() &&
+ !sme_feat_fa64_enabled()) {
+ has_ffr = false;
+ }
+
+ sve_z_regs_write_rand(&ns_sve_z_regs_write);
+ sve_p_regs_write_rand(&ns_sve_p_regs_write);
+ if (has_ffr) {
+ sve_ffr_regs_write_rand(&ns_sve_ffr_regs_write);
+ }
+}
+
+/* Read SVE Z, P and FFR registers and compare it with the last written values */
+static test_result_t ns_sve_read_and_compare(void)
+{
+ test_result_t rc = TEST_RESULT_SUCCESS;
+ uint64_t bitmap;
+ bool has_ffr = true;
+
+ if (is_feat_sme_supported() && sme_smstat_sm() &&
+ !sme_feat_fa64_enabled()) {
+ has_ffr = false;
+ }
+
+ /* Clear old state */
+ memset((void *)&ns_sve_z_regs_read, 0, sizeof(ns_sve_z_regs_read));
+ memset((void *)&ns_sve_p_regs_read, 0, sizeof(ns_sve_p_regs_read));
+ memset((void *)&ns_sve_ffr_regs_read, 0, sizeof(ns_sve_ffr_regs_read));
+
+ /* Read Z, P, FFR registers to compare it with the last written values */
+ sve_z_regs_read(&ns_sve_z_regs_read);
+ sve_p_regs_read(&ns_sve_p_regs_read);
+ if (has_ffr) {
+ sve_ffr_regs_read(&ns_sve_ffr_regs_read);
+ }
+
+ bitmap = sve_z_regs_compare(&ns_sve_z_regs_write, &ns_sve_z_regs_read);
+ if (bitmap != 0UL) {
+ ERROR("SVE Z regs compare failed (bitmap: 0x%016llx)\n",
+ bitmap);
+ rc = TEST_RESULT_FAIL;
+ }
+
+ bitmap = sve_p_regs_compare(&ns_sve_p_regs_write, &ns_sve_p_regs_read);
+ if (bitmap != 0UL) {
+ ERROR("SVE P regs compare failed (bitmap: 0x%016llx)\n",
+ bitmap);
+ rc = TEST_RESULT_FAIL;
+ }
+
+ if (has_ffr) {
+ bitmap = sve_ffr_regs_compare(&ns_sve_ffr_regs_write,
+ &ns_sve_ffr_regs_read);
+ if (bitmap != 0) {
+ ERROR("SVE FFR regs compare failed "
+ "(bitmap: 0x%016llx)\n", bitmap);
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Generate random values and write it to Streaming SVE Z, P and FFR registers.
+ */
+static void ns_sme_write_rand(void)
+{
+ /*
+ * TODO: more SME specific registers like ZA, ZT0 can be included later.
+ */
+
+ /* Fill SVE registers in normal or streaming SVE mode */
+ ns_sve_write_rand();
+}
+
+/*
+ * Read streaming SVE Z, P and FFR registers and compare it with the last
+ * written values
+ */
+static test_result_t ns_sme_read_and_compare(void)
+{
+ /*
+ * TODO: more SME specific registers like ZA, ZT0 can be included later.
+ */
+
+ /* Compares SVE registers in normal or streaming SVE mode */
+ return ns_sve_read_and_compare();
+}
+
+static char *simd_type_to_str(simd_test_t type)
+{
+ if (type == TEST_FPU) {
+ return "FPU";
+ } else if (type == TEST_SVE) {
+ return "SVE";
+ } else if (type == TEST_SME) {
+ return "SME";
+ } else {
+ return "UNKNOWN";
+ }
+}
+
+static void ns_simd_print_cmd_config(bool cmd, simd_test_t type)
+{
+ char __unused *tstr = simd_type_to_str(type);
+ char __unused *cstr = cmd ? "write rand" : "read and compare";
+
+ if (type == TEST_SME) {
+ if (sme_smstat_sm()) {
+ INFO("TFTF: NS [%s] %s. Config: smcr: 0x%llx, SM: on\n",
+ tstr, cstr, (uint64_t)read_smcr_el2());
+ } else {
+ INFO("TFTF: NS [%s] %s. Config: smcr: 0x%llx, "
+ "zcr: 0x%llx sve_hint: %d SM: off\n", tstr, cstr,
+ (uint64_t)read_smcr_el2(),
+ (uint64_t)sve_read_zcr_elx(),
+ tftf_smc_get_sve_hint());
+ }
+ } else if (type == TEST_SVE) {
+ INFO("TFTF: NS [%s] %s. Config: zcr: 0x%llx, sve_hint: %d\n",
+ tstr, cstr, (uint64_t)sve_read_zcr_elx(),
+ tftf_smc_get_sve_hint());
+ } else {
+ INFO("TFTF: NS [%s] %s\n", tstr, cstr);
+ }
+}
+
+/*
+ * Randomly select TEST_SME or TEST_FPU. For TEST_SME, randomly select below
+ * configurations:
+ * - enable/disable streaming mode
+ * For streaming mode:
+ * - enable or disable FA64
+ * - select random streaming vector length
+ * For normal SVE mode:
+ * - select random normal SVE vector length
+ */
+static simd_test_t ns_sme_select_random_config(void)
+{
+ simd_test_t type;
+ static unsigned int counter;
+
+ /* Use a static counter to mostly select TEST_SME case. */
+ if ((counter % 8U) != 0) {
+ /* Use counter to toggle between Streaming mode on or off */
+ if (is_armv8_2_sve_present() && ((counter % 2U) != 0)) {
+ sme_smstop(SMSTOP_SM);
+ sve_config_vq(SVE_GET_RANDOM_VQ);
+
+ if ((counter % 3U) != 0) {
+ tftf_smc_set_sve_hint(true);
+ } else {
+ tftf_smc_set_sve_hint(false);
+ }
+ } else {
+ sme_smstart(SMSTART_SM);
+ sme_config_svq(SME_GET_RANDOM_SVQ);
+
+ if ((counter % 3U) != 0) {
+ sme_enable_fa64();
+ } else {
+ sme_disable_fa64();
+ }
+ }
+ type = TEST_SME;
+ } else {
+ type = TEST_FPU;
+ }
+ counter++;
+
+ return type;
+}
+
+/*
+ * Randomly select TEST_SVE or TEST_FPU. For TEST_SVE, configure zcr_el2 with
+ * random vector length and randomly enable or disable SMC SVE hint bit.
+ */
+static simd_test_t ns_sve_select_random_config(void)
+{
+ simd_test_t type;
+ static unsigned int counter;
+
+ /* Use a static counter to mostly select TEST_SVE case. */
+ if ((counter % 4U) != 0) {
+ sve_config_vq(SVE_GET_RANDOM_VQ);
+
+ if ((counter % 2U) != 0) {
+ tftf_smc_set_sve_hint(true);
+ } else {
+ tftf_smc_set_sve_hint(false);
+ }
+
+ type = TEST_SVE;
+ } else {
+ type = TEST_FPU;
+ }
+ counter++;
+
+ return type;
+}
+
+/*
+ * Configure NS world SIMD. Randomly choose to test SVE or FPU registers if
+ * system supports SVE.
+ *
+ * Returns either TEST_FPU or TEST_SVE or TEST_SME
+ */
+static simd_test_t ns_simd_select_random_config(void)
+{
+ simd_test_t type;
+
+ /* cleanup old config for SME */
+ if (is_feat_sme_supported()) {
+ sme_smstop(SMSTOP_SM);
+ sme_enable_fa64();
+ }
+
+ /* Cleanup old config for SVE */
+ if (is_armv8_2_sve_present()) {
+ tftf_smc_set_sve_hint(false);
+ }
+
+ if (is_armv8_2_sve_present() && is_feat_sme_supported()) {
+ if (rand() % 2) {
+ type = ns_sme_select_random_config();
+ } else {
+ type = ns_sve_select_random_config();
+ }
+ } else if (is_feat_sme_supported()) {
+ type = ns_sme_select_random_config();
+ } else if (is_armv8_2_sve_present()) {
+ type = ns_sve_select_random_config();
+ } else {
+ type = TEST_FPU;
+ }
+
+ return type;
+}
+
+/* Select random NS SIMD config and write random values to its registers */
+static simd_test_t ns_simd_write_rand(void)
+{
+ simd_test_t type;
+
+ type = ns_simd_select_random_config();
+
+ ns_simd_print_cmd_config(true, type);
+
+ if (type == TEST_SME) {
+ ns_sme_write_rand();
+ } else if (type == TEST_SVE) {
+ ns_sve_write_rand();
+ } else {
+ fpu_q_regs_write_rand(ns_fpu_q_regs_write);
+ }
+
+ /* fpcr, fpsr common to all configs */
+ fpu_cs_regs_write_rand(&ns_fpu_cs_regs_write);
+
+ return type;
+}
+
+/* Read and compare the NS SIMD registers with the last written values */
+static test_result_t ns_simd_read_and_compare(simd_test_t type)
+{
+ test_result_t rc = TEST_RESULT_SUCCESS;
+
+ ns_simd_print_cmd_config(false, type);
+
+ if (type == TEST_SME) {
+ rc = ns_sme_read_and_compare();
+ } else if (type == TEST_SVE) {
+ rc = ns_sve_read_and_compare();
+ } else {
+ fpu_q_regs_read(ns_fpu_q_regs_read);
+ if (fpu_q_regs_compare(ns_fpu_q_regs_write,
+ ns_fpu_q_regs_read)) {
+ ERROR("FPU Q registers compare failed\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ /* fpcr, fpsr common to all configs */
+ fpu_cs_regs_read(&ns_fpu_cs_regs_read);
+ if (fpu_cs_regs_compare(&ns_fpu_cs_regs_write, &ns_fpu_cs_regs_read)) {
+ ERROR("FPCR/FPSR registers compare failed\n");
+ rc = TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/* Select random Realm SIMD config and write random values to its registers */
+static simd_test_t rl_simd_write_rand(bool rl_sve_en)
+{
+ enum realm_cmd rl_fill_cmd;
+ simd_test_t type;
+ bool __unused rc;
+
+ /* Select random commands to test. SVE or FPU registers in Realm */
+ if (rl_sve_en && (rand() % 2)) {
+ type = TEST_SVE;
+ } else {
+ type = TEST_FPU;
+ }
+
+ INFO("TFTF: RL [%s] write random\n", simd_type_to_str(type));
+ if (type == TEST_SVE) {
+ rl_fill_cmd = REALM_SVE_FILL_REGS;
+ } else {
+ rl_fill_cmd = REALM_REQ_FPU_FILL_CMD;
+ }
+
+ rc = host_enter_realm_execute(&realm, rl_fill_cmd, RMI_EXIT_HOST_CALL, 0U);
+ assert(rc);
+
+ return type;
+}
+
+/* Read and compare the Realm SIMD registers with the last written values */
+static bool rl_simd_read_and_compare(simd_test_t type)
+{
+ enum realm_cmd rl_cmp_cmd;
+
+ INFO("TFTF: RL [%s] read and compare\n", simd_type_to_str(type));
+ if (type == TEST_SVE) {
+ rl_cmp_cmd = REALM_SVE_CMP_REGS;
+ } else {
+ rl_cmp_cmd = REALM_REQ_FPU_CMP_CMD;
+ }
+
+ return host_enter_realm_execute(&realm, rl_cmp_cmd, RMI_EXIT_HOST_CALL,
+ 0U);
+}
+
+/*
+ * This test case verifies whether various SIMD related registers like Q[0-31],
+ * FPCR, FPSR, Z[0-31], P[0-15], FFR are preserved by RMM during world switch
+ * between NS world and Realm world.
+ *
+ * Randomly verify FPU registers or SVE registers if the system supports SVE.
+ * Within SVE, randomly configure SVE vector length.
+ *
+ * This testcase runs on below configs:
+ * - SVE only
+ * - SME only
+ * - with SVE and SME
+ * - without SVE and SME
+ */
+test_result_t host_and_realm_check_simd(void)
+{
+ u_register_t rmi_feat_reg0;
+ test_result_t rc;
+ uint8_t sve_vq;
+ bool sve_en;
+ security_state_t sec_state;
+ simd_test_t ns_simd_type, rl_simd_type;
+ unsigned int test_iterations;
+ unsigned int num_simd_types;
+ unsigned int num_simd_configs;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (host_rmi_features(0UL, &rmi_feat_reg0) != REALM_SUCCESS) {
+ ERROR("Failed to get RMI feat_reg0\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ sve_en = rmi_feat_reg0 & RMI_FEATURE_REGISTER_0_SVE_EN;
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+
+ /* Create Realm with SVE enabled if RMI features supports it */
+ INFO("TFTF: create realm sve_en/sve_vq: %d/%d\n", sve_en, sve_vq);
+ rc = host_create_sve_realm_payload(sve_en, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Randomly select and configure NS simd context to test. And fill it
+ * with random values.
+ */
+ ns_simd_type = ns_simd_write_rand();
+
+ /*
+ * Randomly select and configure Realm simd context to test. Enter realm
+ * and fill simd context with random values.
+ */
+ rl_simd_type = rl_simd_write_rand(sve_en);
+ sec_state = REALM_WORLD;
+
+ /*
+ * Find out test iterations based on if SVE is enabled and the number of
+ * configurations available in the SVE.
+ */
+
+ /* FPU is always available */
+ num_simd_types = 1U;
+ num_simd_configs = NUM_FPU_CONFIGS;
+
+ if (is_armv8_2_sve_present()) {
+ num_simd_types += 1;
+ num_simd_configs += NUM_SVE_CONFIGS;
+ }
+
+ if (is_feat_sme_supported()) {
+ num_simd_types += 1;
+ num_simd_configs += NUM_SME_CONFIGS;
+ }
+
+ if (num_simd_configs) {
+ test_iterations = TEST_ITERATIONS_MIN * num_simd_types *
+ num_simd_configs;
+ } else {
+ test_iterations = TEST_ITERATIONS_MIN * num_simd_types;
+ }
+
+ for (uint32_t i = 0U; i < test_iterations; i++) {
+ if (sec_state == NONSECURE_WORLD) {
+ sec_state = REALM_WORLD;
+ } else {
+ sec_state = NONSECURE_WORLD;
+ }
+
+ switch (sec_state) {
+ case NONSECURE_WORLD:
+ /*
+ * Read NS simd context and compare it with last written
+ * context.
+ */
+ rc = ns_simd_read_and_compare(ns_simd_type);
+ if (rc != TEST_RESULT_SUCCESS) {
+ goto rm_realm;
+ }
+
+ /*
+ * Randomly select and configure NS simd context. And
+ * fill it with random values for the next compare.
+ */
+ ns_simd_type = ns_simd_write_rand();
+ break;
+ case REALM_WORLD:
+ /*
+ * Enter Realm and read the simd context and compare it
+ * with last written context.
+ */
+ if (!rl_simd_read_and_compare(rl_simd_type)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ /*
+ * Randomly select and configure Realm simd context to
+ * test. Enter realm and fill simd context with random
+ * values for the next compare.
+ */
+ rl_simd_type = rl_simd_write_rand(sve_en);
+ break;
+ default:
+ break;
+ }
+ }
+
+ rc = TEST_RESULT_SUCCESS;
+rm_realm:
+ /* Cleanup old config */
+ if (is_feat_sme_supported()) {
+ sme_smstop(SMSTOP_SM);
+ sme_enable_fa64();
+ }
+
+ /* Cleanup old config */
+ if (is_armv8_2_sve_present()) {
+ tftf_smc_set_sve_hint(false);
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
+
+/*
+ * Create a Realm and check SME specific ID registers. Realm must report SME
+ * not present in ID_AA64PFR1_EL1 and no SME features present in
+ * ID_AA64SMFR0_EL1
+ */
+test_result_t host_realm_check_sme_id_registers(void)
+{
+ host_shared_data_t *sd;
+ struct sme_cmd_id_regs *r_regs;
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ rc = host_create_sve_realm_payload(false, 0);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SME_ID_REGISTERS,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ rc = TEST_RESULT_FAIL;
+ goto rm_realm;
+ }
+
+ sd = host_get_shared_structure(&realm, 0U);
+ r_regs = (struct sme_cmd_id_regs *)sd->realm_cmd_output_buffer;
+
+ /* Check ID register SME flags */
+ rc = TEST_RESULT_SUCCESS;
+ if (EXTRACT(ID_AA64PFR1_EL1_SME, r_regs->id_aa64pfr1_el1) >=
+ ID_AA64PFR1_EL1_SME_SUPPORTED) {
+ ERROR("ID_AA64PFR1_EL1: SME enabled\n");
+ rc = TEST_RESULT_FAIL;
+ }
+ if (r_regs->id_aa64smfr0_el1 != 0UL) {
+ ERROR("ID_AA64SMFR0_EL1: Realm reported non-zero value\n");
+ rc = TEST_RESULT_FAIL;
+ }
+
+rm_realm:
+ host_destroy_realm(&realm);
+ return rc;
+}
+
+/*
+ * Create a Realm and try to access SME, the Realm must receive undefined abort.
+ */
+test_result_t host_realm_check_sme_undef_abort(void)
+{
+ test_result_t rc;
+ bool realm_rc;
+
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ rc = host_create_sve_realm_payload(false, 0);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ realm_rc = host_enter_realm_execute(&realm, REALM_SME_UNDEF_ABORT,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!realm_rc) {
+ ERROR("Realm didn't receive undefined abort\n");
+ rc = TEST_RESULT_FAIL;
+ } else {
+ rc = TEST_RESULT_SUCCESS;
+ }
+
+ host_destroy_realm(&realm);
+ return rc;
+}
+
+/*
+ * Check whether RMM preserves NS SME config values and flags
+ * 1. SMCR_EL2.LEN field
+ * 2. SMCR_EL2.FA64 flag
+ * 3. Streaming SVE mode status
+ *
+ * This test case runs for SVE + SME config and SME only config and skipped for
+ * non SME config.
+ */
+test_result_t host_realm_check_sme_configs(void)
+{
+ u_register_t ns_smcr_el2, ns_smcr_el2_cur;
+ u_register_t rmi_feat_reg0;
+ bool ssve_mode;
+ test_result_t rc;
+ uint8_t sve_vq;
+ uint8_t sme_svq;
+ bool sve_en;
+
+ SKIP_TEST_IF_SME_NOT_SUPPORTED();
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (is_armv8_2_sve_present()) {
+ CHECK_SVE_SUPPORT_IN_HW_AND_IN_RMI(rmi_feat_reg0);
+ sve_en = true;
+ sve_vq = EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, rmi_feat_reg0);
+ } else {
+ sve_en = false;
+ sve_vq = 0;
+ }
+
+ rc = host_create_sve_realm_payload(sve_en, sve_vq);
+ if (rc != TEST_RESULT_SUCCESS) {
+ return rc;
+ }
+
+ /*
+ * Configure TFTF from 0 to SME_SVQ_ARCH_MAX, and in each iteration
+ * randomly enable or disable FA64 and Streaming SVE mode. Ater calling
+ * Realm, check the NS SME configuration status.
+ */
+ rc = TEST_RESULT_SUCCESS;
+ for (sme_svq = 0U; sme_svq <= SME_SVQ_ARCH_MAX; sme_svq++) {
+ bool realm_rc;
+
+ sme_config_svq(sme_svq);
+
+ /* randomly enable or disable FEAT_SME_FA64 */
+ if (sme_svq % 2) {
+ sme_enable_fa64();
+ sme_smstart(SMSTART_SM);
+ ssve_mode = true;
+ } else {
+ sme_disable_fa64();
+ sme_smstop(SMSTOP_SM);
+ ssve_mode = false;
+ }
+
+ ns_smcr_el2 = read_smcr_el2();
+
+ /*
+ * If SVE is supported then we would have created a Realm with
+ * SVE support, so run SVE command else run FPU command
+ */
+ if (sve_en) {
+ realm_rc = host_enter_realm_execute(&realm, REALM_SVE_RDVL,
+ RMI_EXIT_HOST_CALL,
+ 0U);
+ } else {
+ realm_rc = host_enter_realm_execute(&realm,
+ REALM_REQ_FPU_FILL_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+ }
+
+ if (!realm_rc) {
+ ERROR("Realm command REALM_SVE_RDVL failed\n");
+ rc = TEST_RESULT_FAIL;
+ break;
+ }
+ ns_smcr_el2_cur = read_smcr_el2();
+
+ if (ns_smcr_el2 != ns_smcr_el2_cur) {
+ ERROR("NS SMCR_EL2 expected: 0x%lx, got: 0x%lx\n",
+ ns_smcr_el2, ns_smcr_el2_cur);
+ rc = TEST_RESULT_FAIL;
+ }
+
+ if (sme_smstat_sm() != ssve_mode) {
+ if (ssve_mode) {
+ ERROR("NS Streaming SVE mode is disabled\n");
+ } else {
+ ERROR("NS Streaming SVE mode is enabled\n");
+ }
+
+ rc = TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Exit Streaming SVE mode if test case enabled it */
+ if (ssve_mode) {
+ sme_smstop(SMSTOP_SM);
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return rc;
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c b/tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c
new file mode 100644
index 000000000..1ccf3175a
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_payload_tests.c
@@ -0,0 +1,1824 @@
+/*
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <assert.h>
+#include <arch_features.h>
+#include <debug.h>
+#include <irq.h>
+#include <drivers/arm/arm_gic.h>
+#include <drivers/arm/gic_v3.h>
+#include <heap/page_alloc.h>
+#include <pauth.h>
+#include <test_helpers.h>
+
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_realm_pmu.h>
+#include <host_shared_data.h>
+
+#define SLEEP_TIME_MS 20U
+
+extern const char *rmi_exit[];
+
+#if ENABLE_PAUTH
+static uint128_t pauth_keys_before[NUM_KEYS];
+static uint128_t pauth_keys_after[NUM_KEYS];
+#endif
+
+/*
+ * @Test_Aim@ Test realm payload creation, execution and destruction iteratively
+ */
+test_result_t host_test_realm_create_enter(void)
+{
+ bool ret1, ret2;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ for (unsigned int i = 0U; i < 5U; i++) {
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, SLEEP_TIME_MS);
+ ret1 = host_enter_realm_execute(&realm, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * @Test_Aim@ Test realm payload creation and execution
+ */
+test_result_t host_test_realm_rsi_version(void)
+{
+ bool ret1, ret2;
+ u_register_t rec_flag[] = {RMI_RUNNABLE};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, REALM_GET_RSI_VERSION, RMI_EXIT_HOST_CALL, 0U);
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * @Test_Aim@ Create realm with multiple rec
+ * Test PAuth registers are preserved for each rec
+ */
+test_result_t host_realm_enable_pauth(void)
+{
+#if ENABLE_PAUTH == 0
+ return TEST_RESULT_SKIPPED;
+#else
+ bool ret1, ret2;
+ u_register_t rec_flag[MAX_REC_COUNT] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ pauth_test_lib_fill_regs_and_template(pauth_keys_before);
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ for (unsigned int i = 0U; i < MAX_REC_COUNT; i++) {
+ ret1 = host_enter_realm_execute(&realm, REALM_PAUTH_SET_CMD,
+ RMI_EXIT_HOST_CALL, i);
+
+ if (!ret1) {
+ ERROR("Pauth set cmd failed\n");
+ break;
+ }
+ /* Re-enter Realm to compare PAuth registers. */
+ ret1 = host_enter_realm_execute(&realm, REALM_PAUTH_CHECK_CMD,
+ RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ ERROR("Pauth check cmd failed\n");
+ break;
+ }
+ }
+
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check if PAuth keys are preserved. */
+ if (!pauth_test_lib_compare_template(pauth_keys_before, pauth_keys_after)) {
+ ERROR("%s(): NS PAuth keys not preserved\n",
+ __func__);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+#endif
+}
+
+/*
+ * @Test_Aim@ Test PAuth fault in Realm
+ */
+test_result_t host_realm_pauth_fault(void)
+{
+#if ENABLE_PAUTH == 0
+ return TEST_RESULT_SKIPPED;
+#else
+ bool ret1, ret2;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, REALM_PAUTH_FAULT, RMI_EXIT_HOST_CALL, 0U);
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret1) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+#endif
+}
+
+/*
+ * This function is called on REC exit due to IRQ.
+ * By checking Realm PMU state in RecExit object this finction
+ * detects if the exit was caused by PMU interrupt. In that
+ * case it disables physical PMU interrupt and sets virtual
+ * PMU interrupt pending by writing to gicv3_lrs attribute
+ * of RecEntry object and re-enters the Realm.
+ *
+ * @return true in case of PMU interrupt, false otherwise.
+ */
+static bool host_realm_handle_irq_exit(struct realm *realm_ptr,
+ unsigned int rec_num)
+{
+ struct rmi_rec_run *run = (struct rmi_rec_run *)realm_ptr->run[rec_num];
+
+ /* Check PMU overflow status */
+ if (run->exit.pmu_ovf_status == RMI_PMU_OVERFLOW_ACTIVE) {
+ unsigned int host_call_result;
+ u_register_t exit_reason, retrmm;
+ int ret;
+
+ tftf_irq_disable(PMU_PPI);
+ ret = tftf_irq_unregister_handler(PMU_PPI);
+ if (ret != 0) {
+ ERROR("Failed to %sregister IRQ handler\n", "un");
+ return false;
+ }
+
+ /* Inject PMU virtual interrupt */
+ run->entry.gicv3_lrs[0] =
+ ICH_LRn_EL2_STATE_Pending | ICH_LRn_EL2_Group_1 |
+ (PMU_VIRQ << ICH_LRn_EL2_vINTID_SHIFT);
+
+ /* Re-enter Realm */
+ INFO("Re-entering Realm with vIRQ %lu pending\n", PMU_VIRQ);
+
+ retrmm = host_realm_rec_enter(realm_ptr, &exit_reason,
+ &host_call_result, rec_num);
+ if ((retrmm == REALM_SUCCESS) &&
+ (exit_reason == RMI_EXIT_HOST_CALL) &&
+ (host_call_result == TEST_RESULT_SUCCESS)) {
+ return true;
+ }
+
+ ERROR("%s() failed, ret=%lx host_call_result %u\n",
+ "host_realm_rec_enter", retrmm, host_call_result);
+ }
+ return false;
+}
+
+/*
+ * @Test_Aim@ Test realm PMU
+ *
+ * This function tests PMU functionality in Realm
+ *
+ * @cmd: PMU test number
+ * @return test result
+ */
+static test_result_t host_test_realm_pmuv3(uint8_t cmd)
+{
+ struct realm realm;
+ u_register_t feature_flag;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ bool ret1, ret2;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ host_set_pmu_state();
+
+ feature_flag = RMI_FEATURE_REGISTER_0_PMU_EN |
+ INPLACE(FEATURE_PMU_NUM_CTRS, (unsigned long long)(-1));
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret1 = host_enter_realm_execute(&realm, cmd,
+ (cmd == REALM_PMU_INTERRUPT) ?
+ RMI_EXIT_IRQ : RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1 || (cmd != REALM_PMU_INTERRUPT)) {
+ goto test_exit;
+ }
+
+ ret1 = host_realm_handle_irq_exit(&realm, 0U);
+
+test_exit:
+ ret2 = host_destroy_realm(&realm);
+ if (!ret1 || !ret2) {
+ ERROR("%s() enter=%u destroy=%u\n", __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_check_pmu_state()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
+
+/*
+ * Test if the cycle counter works in Realm with NOPs execution
+ */
+test_result_t host_realm_pmuv3_cycle_works(void)
+{
+ return host_test_realm_pmuv3(REALM_PMU_CYCLE);
+}
+
+/*
+ * Test if the event counter works in Realm with NOPs execution
+ */
+test_result_t host_realm_pmuv3_event_works(void)
+{
+ return host_test_realm_pmuv3(REALM_PMU_EVENT);
+}
+
+/*
+ * Test if Realm entering/exiting RMM preserves PMU state
+ */
+test_result_t host_realm_pmuv3_rmm_preserves(void)
+{
+ return host_test_realm_pmuv3(REALM_PMU_PRESERVE);
+}
+
+/*
+ * IRQ handler for PMU_PPI #23.
+ * This handler should not be called, as RMM handles IRQs.
+ */
+static int host_overflow_interrupt(void *data)
+{
+ (void)data;
+
+ assert(false);
+ return -1;
+}
+
+/*
+ * Test PMU interrupt functionality in Realm
+ */
+test_result_t host_realm_pmuv3_overflow_interrupt(void)
+{
+ /* Register PMU IRQ handler */
+ int ret = tftf_irq_register_handler(PMU_PPI, host_overflow_interrupt);
+
+ if (ret != 0) {
+ tftf_testcase_printf("Failed to %sregister IRQ handler\n",
+ "");
+ return TEST_RESULT_FAIL;
+ }
+
+ tftf_irq_enable(PMU_PPI, GIC_HIGHEST_NS_PRIORITY);
+ return host_test_realm_pmuv3(REALM_PMU_INTERRUPT);
+}
+
+/*
+ * Test aim to create, enter and destroy 2 realms
+ * Host created 2 realms with 1 rec each
+ * Host enters both rec sequentially
+ * Verifies both realm returned success
+ * Destroys both realms
+ */
+test_result_t host_test_multiple_realm_create_enter(void)
+{
+ bool ret1, ret2, ret3;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ struct realm realm1, realm2;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm1, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+
+
+ if (!host_create_activate_realm_payload(&realm2, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE + PAGE_POOL_MAX_SIZE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ ret2 = host_destroy_realm(&realm1);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!host_create_shared_mem(&realm1, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ ret1 = false;
+ goto destroy_realms;
+ }
+
+ if (!host_create_shared_mem(&realm2, NS_REALM_SHARED_MEM_BASE +
+ NS_REALM_SHARED_MEM_SIZE, NS_REALM_SHARED_MEM_SIZE)) {
+ ret1 = false;
+ goto destroy_realms;
+ }
+
+ host_shared_data_set_host_val(&realm1, 0U, HOST_ARG1_INDEX, SLEEP_TIME_MS);
+ ret1 = host_enter_realm_execute(&realm1, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ goto destroy_realms;
+ }
+ host_shared_data_set_host_val(&realm2, 0U, HOST_ARG1_INDEX, SLEEP_TIME_MS);
+ ret1 = host_enter_realm_execute(&realm2, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+
+destroy_realms:
+ ret2 = host_destroy_realm(&realm1);
+ ret3 = host_destroy_realm(&realm2);
+
+ if (!ret3 || !ret2) {
+ ERROR("destroy failed\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!ret1) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test set_ripas functionality in Realm
+ * Test allocates 3 PAGES and passes to Realm
+ * Realm: verifies that initial RIPAS of these pages is EMPTY
+ * Realm: requests RIPAS Change to RAM
+ * Host: attempt to change RIPAS outside requested range, verifies error generated by RMM
+ * Host: changes RIPAS of first PAGE and re-enters Realm
+ * Realm: tracks progress and requests RIPAS Change to RAM till all pages are complete
+ * Host: changes RIPAS of requested PAGE and re-enters Realm
+ * Realm: verifies all PAGES are set to RIPAS=RAM
+ */
+test_result_t host_realm_set_ripas(void)
+{
+ bool ret1, ret2;
+ u_register_t ret, base, new_base, exit_reason;
+ unsigned int host_call_result = TEST_RESULT_FAIL;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+ u_register_t test_page_num = 3U;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, 10U);
+ ret1 = host_enter_realm_execute(&realm, REALM_SLEEP_CMD, RMI_EXIT_HOST_CALL, 0U);
+ base = (u_register_t)page_alloc(PAGE_SIZE * test_page_num);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG2_INDEX,
+ base + (PAGE_SIZE * test_page_num));
+
+ for (unsigned int i = 0U; i < test_page_num; i++) {
+ ret = host_realm_delegate_map_protected_data(true, &realm,
+ base + (PAGE_SIZE * i), PAGE_SIZE,
+ base + (PAGE_SIZE * i));
+ if (ret != REALM_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ }
+ ret1 = host_enter_realm_execute(&realm, REALM_SET_RIPAS_CMD,
+ RMI_EXIT_RIPAS_CHANGE, 0U);
+ if (!ret1) {
+ ERROR("Rec enter failed\n");
+ goto destroy_realm;
+ }
+ run = (struct rmi_rec_run *)realm.run[0U];
+
+ /* Attemp to set ripas for IPA out of requested range, expect error */
+ ret = host_rmi_rtt_set_ripas(realm.rd,
+ realm.rec[0U],
+ run->exit.ripas_base - PAGE_SIZE,
+ run->exit.ripas_base,
+ &new_base);
+ if (ret != RMI_ERROR_INPUT || new_base != 0U) {
+ ERROR("host_rmi_rtt_set_ripas should have failed ret = 0x%lx\n", ret);
+ goto destroy_realm;
+ }
+
+ while (run->exit.ripas_base <= base + (PAGE_SIZE * test_page_num)) {
+ INFO("host_rmi_rtt_set_ripas ripas_base=0x%lx\n",
+ run->exit.ripas_base);
+ ret = host_rmi_rtt_set_ripas(realm.rd,
+ realm.rec[0U],
+ run->exit.ripas_base,
+ run->exit.ripas_base + PAGE_SIZE,
+ &new_base);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_rtt_set_ripas failed ret = 0x%lx\n", ret);
+ goto destroy_realm;
+ }
+ ret = host_realm_rec_enter(&realm,
+ &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_RIPAS_CHANGE) {
+ goto destroy_realm;
+ }
+ }
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ page_free(base);
+ return host_call_result;
+}
+
+/*
+ * Test set_ripas reject functionality in Realm
+ * Test allocates PAGE and passes to Realm
+ * Realm: verifies that initial RIPAS of page is EMPTY
+ * Realm: requests RIPAS Change to RAM
+ * Host: changes rejects RIPAS change and enters Realm
+ * Realm: verifies REJECT response
+ * Realm: verifies PAGE has RIPAS=EMPTY
+ */
+
+test_result_t host_realm_reject_set_ripas(void)
+{
+ bool ret1, ret2;
+ u_register_t ret, exit_reason;
+ unsigned int host_call_result = TEST_RESULT_FAIL;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ u_register_t rec_flag[1] = {RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, base);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failede\n");
+ goto destroy_realm;
+ }
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ ret1 = host_enter_realm_execute(&realm, REALM_REJECT_SET_RIPAS_CMD,
+ RMI_EXIT_RIPAS_CHANGE, 0U);
+
+ if (!ret1) {
+ ERROR("Rec did not request RIPAS change\n");
+ goto destroy_realm;
+ }
+ run = (struct rmi_rec_run *)realm.run[0];
+ if (run->exit.ripas_base != base) {
+ ERROR("Rec requested wrong exit.ripas_base\n");
+ goto destroy_realm;
+ }
+ run->entry.flags = REC_ENTRY_FLAG_RIPAS_RESPONSE_REJECT;
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 0U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_HOST_CALL) {
+ ERROR("Re-enter rec failed exit_reason=0x%lx", exit_reason);
+ }
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_call_result;
+}
+
+/*
+ * Test aims to generate REALM Exit due to abort
+ * when access page with RIPAS=DESTOYED HIPAS=ASSIGNED
+ * Host maps a protected page (calls data_create) when realm is in new state
+ * Initial state of PAGE is RIPAS=RAM HIPAS=ASSIGNED
+ * Host calls data_destroy, new state HIPAS=UNASSIGNED RIPAS=DESTROYED
+ * Enter Realm, Rec0 executes from page, and Rec1 reads the page
+ * Realm should trigger an Instr/Data abort, and will exit to Host.
+ * The Host verifies exit reason is Instr/Data abort
+ */
+test_result_t host_realm_abort_unassigned_destroyed(void)
+{
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, data, top;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ struct rtt_entry rtt;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ /* DATA_CREATE
+ * Copies content of TFTF_BASE in newly created page, any PA can be used for dummy copy
+ * maps 1:1 IPA:PA
+ */
+ ret = host_realm_delegate_map_protected_data(false, &realm, base, PAGE_SIZE, TFTF_BASE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+ INFO("Initial state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ if (ret != RMI_SUCCESS || data != base) {
+ ERROR("host_rmi_data_destroy failed\n");
+ goto undelegate_destroy;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED) {
+ ERROR("Wrong state after host_rmi_data_destroy\n");
+ goto undelegate_destroy;
+ }
+
+ INFO("New state4 base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+
+ if (host_realm_activate(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto undelegate_destroy;
+ }
+
+ /* Realm0 expect rec exit due to Instr Abort unassigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_SYNC, 0U);
+
+ /* ESR.EC == 0b100000 Instruction Abort from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_IABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_IFSC_MASK) < IFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_IFSC_MASK) > IFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto undelegate_destroy;
+ }
+ INFO("IA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+
+ run = (struct rmi_rec_run *)realm.run[1];
+
+ /* Realm1 expect rec exit due to Data Abort unassigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ /* ESR.EC == 0b100100 Data Abort exception from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault\n");
+ goto undelegate_destroy;
+ }
+ INFO("DA FAR=0x%lx, HPFAR=0x%lx ESR= 0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ res = TEST_RESULT_SUCCESS;
+
+undelegate_destroy:
+ ret = host_rmi_granule_undelegate(base);
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate REALM Exit due to Abort
+ * when access page with RIPAS=RAM HIPAS=UNASSIGNED
+ * Host allocates a PAGE, calls init_ripas when realm is in new state
+ * Initial state of PAGE is RIPAS=RAM HIPAS=UNASSIGNED
+ * Enter Realm, REC0 executes from page, and REC1 reads the page
+ * Realm should trigger an Instr/Data abort, and will exit to Host.
+ * Host verifies exit reason is Instr/Data abort.
+ */
+test_result_t host_realm_abort_unassigned_ram(void)
+{
+ bool ret1, ret2;
+ u_register_t ret, top;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ struct rtt_entry rtt;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* This is dummy allocation to get a base address */
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ /* Set RIPAS of PAGE to RAM */
+ ret = host_rmi_rtt_init_ripas(realm.rd, base, base + PAGE_SIZE, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_rtt_init_ripas", ret, __LINE__);
+ goto destroy_realm;
+ }
+ if (host_realm_activate(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong initial state\n");
+ goto destroy_realm;
+ }
+ INFO("Initial state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+
+ /* Rec0 expect rec exit due to Instr Abort unassigned ram page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_SYNC, 0U);
+
+ /* ESR.EC == 0b100000 Instruction Abort from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_IABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_IFSC_MASK) < IFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_IFSC_MASK) > IFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("IA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ run = (struct rmi_rec_run *)realm.run[1];
+
+ /* Rec1 expect rec exit due to Data Abort unassigned ram page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ /* ESR.EC == 0b100100 Data Abort exception from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("DA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ res = TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate REALM Exit due to Abort
+ * when access page with RIPAS=DESTOYED HIPAS=Assigned
+ * Host maps a protected page (calls data_create) when realm is in new state
+ * initial state of PAGE is RIPAS=RAM HIPAS=ASSIGNED
+ * Host calls data_destroy, new state HIPAS=UNASSIGNED RIPAS=DESTROYED
+ * Host calls data_create_unknown, new state HIPAS=ASSIGNED RIPAS=DESTROYED
+ * Enter Realm, REC0 executes from page, and REC1 reads the page
+ * Realm should trigger an Instr/Data abort, and will exit to Host.
+ * The Host verifies exit reason is Instr/Data abort
+ */
+test_result_t host_realm_abort_assigned_destroyed(void)
+{
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, top, data;
+ struct realm realm;
+ struct rmi_rec_run *run;
+ struct rtt_entry rtt;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ /* DATA_CREATE */
+ /* Copied content of TFTF_BASE to new page, can use any adr, maps 1:1 IPA:PA */
+ ret = host_realm_delegate_map_protected_data(false, &realm, base, PAGE_SIZE, TFTF_BASE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after data create\n");
+ goto destroy_realm;
+ }
+ INFO("Initial state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+
+ if (host_realm_activate(&realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ if (ret != RMI_SUCCESS || data != base) {
+ ERROR("host_rmi_data_destroy failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED) {
+ ERROR("Wrong state after host_rmi_data_destroy\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_granule_undelegate(base);
+
+ /* DATA_CREATE_UNKNOWN */
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failede\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_DESTROYED)) {
+ ERROR("wrong state after data create unknown\n");
+ goto destroy_data;
+ }
+
+ /* Rec0, expect rec exit due to Instr Abort assigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_SYNC, 0U);
+
+ /* ESR.EC == 0b100000 Instruction Abort from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_IABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_IFSC_MASK) < IFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_IFSC_MASK) > IFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_data;
+ }
+ INFO("IA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ run = (struct rmi_rec_run *)realm.run[1];
+
+ /* Rec1 expect rec exit due to Data Abort assigned destroyed page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ /* ESR.EC == 0b100100 Data Abort exception from a lower Exception level */
+ if (!ret1 || ((run->exit.hpfar >> 4U) != (base >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U))) {
+ ERROR("Rec did not fault ESR=0x%lx\n", run->exit.esr);
+ goto destroy_data;
+ }
+ INFO("DA FAR=0x%lx, HPFAR=0x%lx ESR=0x%lx\n", run->exit.far, run->exit.hpfar,
+ run->exit.esr);
+ res = TEST_RESULT_SUCCESS;
+
+destroy_data:
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ ret = host_rmi_granule_undelegate(base);
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate SEA in Realm by accessing
+ * PAGE with HIPAS=assigned/unassigned and RIPAS=EMPTY
+ * Host creates and executes 4 recs to generate SEA
+ * Rec exception handler runs and returns back ESR to Host
+ * Host validates ESR
+ * Rec0 generated IA unassigned empty
+ * Rec1 generated DA unassigned empty
+ * Rec2 generated IA for assigned empty
+ * Rec3 generated DA for assigned empty
+ */
+test_result_t host_realm_sea_empty(void)
+{
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, base, esr;
+ struct realm realm;
+ struct rtt_entry rtt;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 4U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (rtt.state != RMI_UNASSIGNED ||
+ (rtt.ripas != RMI_EMPTY)) {
+ ERROR("wrong initial state\n");
+ goto destroy_realm;
+ }
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 2U, HOST_ARG1_INDEX, base);
+ host_shared_data_set_host_val(&realm, 3U, HOST_ARG1_INDEX, base);
+
+ /* Rec0 expect IA due to SEA unassigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ ERROR("Rec0 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 0U, HOST_ARG2_INDEX);
+ if (((esr & ISS_IFSC_MASK) != IFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_IABORT_CUR_EL)) {
+ ERROR("Rec0 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec0 ESR=0x%lx\n", esr);
+
+ /* Rec1 expect DA due to SEA unassigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_HOST_CALL, 1U);
+ if (!ret1) {
+ ERROR("Rec1 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 1U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_DABORT_CUR_EL)) {
+ ERROR("Rec1 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec1 ESR=0x%lx\n", esr);
+
+ /* DATA_CREATE_UNKNOWN */
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_EMPTY)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+ INFO("state base = 0x%lx rtt.state=0x%lx rtt.ripas=0x%lx\n",
+ base, rtt.state, rtt.ripas);
+
+ /* Rec2 expect IA due to SEA assigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 2U);
+
+ if (!ret1) {
+ ERROR("Rec2 did not fault\n");
+ goto undelegate_destroy;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 2U, HOST_ARG2_INDEX);
+ if (((esr & ISS_IFSC_MASK) != IFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_IABORT_CUR_EL)) {
+ ERROR("Rec2 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec2 ESR=0x%lx\n", esr);
+
+ /* Rec3 expect DA due to SEA unassigned empty page */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_HOST_CALL, 3U);
+ if (!ret1) {
+ ERROR("Rec3 did not fault\n");
+ goto undelegate_destroy;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 3U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_DABORT_CUR_EL)) {
+ ERROR("Rec3 incorrect ESR=0x%lx\n", esr);
+ }
+ INFO("Rec3 ESR=0x%lx\n", esr);
+ res = TEST_RESULT_SUCCESS;
+
+undelegate_destroy:
+ ret = host_rmi_granule_undelegate(base);
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * Test aims to generate SEA in Realm by
+ * executing instructions in unprotected IPA - Rec0
+ * In Rec 1 , when HIPAS=UNASSIGNED_NS, we expect to get a Data abort.
+ * Then Host will inject SEA to realm.
+ * Realm exception handler runs and returns ESR back to Host
+ * Host validates ESR
+ */
+test_result_t host_realm_sea_unprotected(void)
+{
+
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t ret, base, base_ipa, esr;
+ unsigned int host_call_result;
+ u_register_t exit_reason;
+ struct realm realm;
+ struct rtt_entry rtt;
+ struct rmi_rec_run *run;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE};
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* Can choose any unprotected IPA adr, TFTF_BASE chosen for convenience */
+ base = TFTF_BASE;
+ base_ipa = base | (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm.rmm_feat_reg0) - 1U));
+
+
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (rtt.state != RMI_UNASSIGNED) {
+ ERROR("wrong state\n");
+ goto destroy_realm;
+ }
+
+ run = (struct rmi_rec_run *)realm.run[0];
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base_ipa);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base_ipa);
+
+ /* Rec0 expect SEA in realm due to IA unprotected IPA page */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ ERROR("Rec0 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 0U, HOST_ARG2_INDEX);
+ if (((esr & ISS_IFSC_MASK) != IFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_IABORT_CUR_EL)) {
+ ERROR("Rec0 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec0 ESR=0x%lx\n", esr);
+
+ run = (struct rmi_rec_run *)realm.run[1U];
+
+ /* Rec1 expect rec exit due to DA unprotected IPA page when HIPAS is UNASSIGNED_NS */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_SYNC, 1U);
+
+ if (!ret1 || (run->exit.hpfar >> 4U) != (base_ipa >> PAGE_SIZE_SHIFT)
+ || (EC_BITS(run->exit.esr) != EC_DABORT_LOWER_EL)
+ || ((run->exit.esr & ISS_DFSC_MASK) < DFSC_L0_TRANS_FAULT)
+ || ((run->exit.esr & ISS_DFSC_MASK) > DFSC_L3_TRANS_FAULT)
+ || ((run->exit.esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U)) {
+ ERROR("Rec1 did not fault exit=0x%lx ret1=%d HPFAR=0x%lx esr=0x%lx\n",
+ run->exit.exit_reason, ret1, run->exit.hpfar, run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("Host DA FAR=0x%lx, HPFAR=0x%lx\n", run->exit.far, run->exit.hpfar);
+ INFO("Injecting SEA to Realm\n");
+
+ /* Inject SEA back to Realm */
+ run->entry.flags = REC_ENTRY_FLAG_INJECT_SEA;
+
+ /* Rec1 re-entry expect exception handler to run, return ESR */
+ ret = host_realm_rec_enter(&realm, &exit_reason, &host_call_result, 1U);
+ if (ret != RMI_SUCCESS || exit_reason != RMI_EXIT_HOST_CALL) {
+ ERROR("rec1 failed ret=0x%lx exit_reason=0x%lx", ret, run->exit.exit_reason);
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 1U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_NO_WALK_SEA) || (EC_BITS(esr) != EC_DABORT_CUR_EL)) {
+ ERROR("Rec1 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec1 ESR=0x%lx\n", esr);
+ res = host_call_result;
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
+
+/*
+ * @Test_Aim@ Test to check if DIT bit is preserved across NS/RL switch
+ */
+test_result_t host_realm_enable_dit(void)
+{
+ bool ret1, ret2;
+ struct realm realm;
+ u_register_t rec_flag[] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE,
+ RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE}, dit;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, MAX_REC_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Enable FEAT_DIT on Host */
+ write_dit(DIT_BIT);
+ for (unsigned int i = 0; i < MAX_REC_COUNT; i++) {
+ host_shared_data_set_host_val(&realm, i, HOST_ARG1_INDEX, 10U);
+ ret1 = host_enter_realm_execute(&realm, REALM_DIT_CHECK_CMD,
+ RMI_EXIT_HOST_CALL, i);
+ if (!ret1) {
+ break;
+ }
+ }
+
+ ret2 = host_destroy_realm(&realm);
+
+ dit = read_dit();
+ if (dit != DIT_BIT) {
+ ERROR("Host DIT bit not preserved\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ write_dit(0U);
+ if (!ret1 || !ret2) {
+ ERROR("%s(): enter=%d destroy=%d\n",
+ __func__, ret1, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Helper to test whether RTT_DESTROY will change state of
+ * Unassigned ram page to Unassigned Destroyed
+ * Realm is in new state, activate realm before RTT_DESTROY if requested
+ */
+static test_result_t test_rtt_destroy_ram(struct realm *realm, bool activate)
+{
+ u_register_t ret, top, out_rtt, base;
+ struct rtt_entry rtt;
+
+ /* Find an address not mapped in L3 */
+ base = ALIGN_DOWN(PAGE_POOL_BASE, RTT_MAP_SIZE(2U));
+ while (true) {
+ ret = host_rmi_rtt_readentry(realm->rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.walk_level != 2U || rtt.state != RMI_UNASSIGNED
+ || (rtt.ripas != RMI_EMPTY)) {
+ base += RTT_MAP_SIZE(2U);
+ continue;
+ }
+ break;
+ }
+
+ INFO("base = 0x%lx\n", base);
+
+ /* Create L3 RTT entries */
+ ret = host_rmi_create_rtt_levels(realm, base, rtt.walk_level, 3U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_create_rtt_levels failed ret=0x%lx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* L3 entry should be created */
+ ret = host_rmi_rtt_readentry(realm->rd, base, 3U, &rtt);
+ if (rtt.walk_level != 3U) {
+ ERROR("host_rmi_create_rtt_levels failed ret=0x%lx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = host_rmi_rtt_init_ripas(realm->rd, base, base + PAGE_SIZE, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_rtt_init_ripas", ret, __LINE__);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* INIT_RIPAS should move state to unassigned ram */
+ ret = host_rmi_rtt_readentry(realm->rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after INIT_RIPAS\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (activate) {
+ /* Activate Realm */
+ if (host_realm_activate(realm) != REALM_SUCCESS) {
+ ERROR("%s() failed\n", "host_realm_activate");
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Destroy newly created rtt, for protected IPA there should be no live L3 entry */
+ ret = host_rmi_rtt_destroy(realm->rd, base, 3U, &out_rtt, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_rtt_destroy failed ret=0x%lx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ ret = host_rmi_granule_undelegate(out_rtt);
+
+ /* Walk should terminate at L2 after RTT_DESTROY */
+ ret = host_rmi_rtt_readentry(realm->rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED || rtt.walk_level != 2U) {
+ ERROR("Wrong state after host_rmi_rtt_destroy\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Helper to test whether RTT_DESTROY will change state of
+ * Unassigned empty page to Unassigned Destroyed
+ * Realm can be in new or active state
+ */
+static test_result_t test_rtt_destroy_empty(struct realm *realm)
+{
+ u_register_t base, ret, top, out_rtt;
+ struct rtt_entry rtt;
+
+ /* Find an address not mapped in L3 */
+ base = ALIGN_DOWN(PAGE_POOL_BASE, RTT_MAP_SIZE(2U));
+ while (true) {
+ ret = host_rmi_rtt_readentry(realm->rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.walk_level != 2U || rtt.state != RMI_UNASSIGNED
+ || (rtt.ripas != RMI_EMPTY)) {
+ base += RTT_MAP_SIZE(2U);
+ continue;
+ }
+ break;
+ }
+
+ INFO("base = 0x%lx\n", base);
+
+ /* Create L3 RTT entries */
+ ret = host_rmi_create_rtt_levels(realm, base, rtt.walk_level, 3U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_create_rtt_levels failed ret=0x%lx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* L3 entry should be created */
+ ret = host_rmi_rtt_readentry(realm->rd, base, 3U, &rtt);
+ if (rtt.walk_level != 3U) {
+ ERROR("host_rmi_create_rtt_levels failed ret=0x%lx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Destroy newly created rtt, for protected IPA there should be no live L3 entry */
+ ret = host_rmi_rtt_destroy(realm->rd, base, 3U, &out_rtt, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_rtt_destroy failed ret=0x%lx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+ ret = host_rmi_granule_undelegate(out_rtt);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_granule_undelegate RTT failed ret=0x%lx\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Walk should terminate at L2 after RTT_DESTROY */
+ ret = host_rmi_rtt_readentry(realm->rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED || rtt.walk_level != 2U) {
+ ERROR("Wrong state after host_rmi_rtt_destroy\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test aims to test PAS transitions
+ * when realm is in new state
+ * 1. Test initial state of PAGE is Unassigned Empty
+ * 2. Test DATA_CREATE moves initial state to Assigned Ram
+ * a. Test DATA_DESTROY moves state to Unassigned Destroyed
+ * b. Test DATA_CREATE_UNKNOWN moves state to Assigned Destroyed
+ * c. Test DATA_DESTROY moves state to Unassigned Destroyed
+ * 3. Test DATA_CREATE_UNKNOWN moves initial state (new page) to Assigned Empty
+ * Test DATA_DESTROY moves state to Unassigned Empty
+ * 4. Test INIT_RIPAS moves initial state (new page) to Unassigned RAM
+ * a. Test DATA_CREATE_UNKNOWN moves state to Assigned Ram
+ * 5. Test RTT_DESTROY moves initial state (new page) to Unassigned Destroyed
+ * 6. Test RTT_DESTROY moves state (new page) unassigned ram to Unassigned Destroyed
+ * Transition
+ *
+ * +------------------+ +-------------------+ +-------------------+
+ * | Assigned Empty | | Assigned Dest | | Assigned RAM |
+ * +------------------+ +--+---^------------+ +-------^---+-----^-+
+ * ^ | | ^ ^ | ^
+ * | | | | | | |
+ * | | | | 2a | | |
+ * | | | | +--------------------------+---+ |
+ * | | | | 2b | | |4a
+ * | |3a | +---------+ | |
+ * 3 | | 2c| | | | |
+ * | | | | | | |
+ * | +-----+--------2---------+----------+--+-----------------------+ |
+ * | | | | | | |
+ * | | V V V | |
+ * +----+----+-----v---+ |--V----------V--+---| |------------------+--|
+ * --->| Unassigned Empty |---------->|Unassigned Dest |<--------| Unassigned RAM |
+ * 1 +--------------+----+ 5 +--------------------+ 6 +---------^-----------+
+ * | ^
+ * | |
+ * +---------------------------------------------------------+
+ * 4
+ */
+test_result_t host_realm_pas_validation_new(void)
+{
+ bool ret1;
+ test_result_t test_result = TEST_RESULT_FAIL;
+ u_register_t ret, data, top;
+ struct realm realm;
+ struct rtt_entry rtt;
+ u_register_t rec_flag[2U] = {RMI_RUNNABLE, RMI_RUNNABLE}, base;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 2U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ INFO("Test 1\n");
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+
+ /* Create level 3 RTT */
+ ret = host_rmi_create_rtt_levels(&realm, base, 3U, 3U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_create_rtt_levels failed\n");
+ goto destroy_realm;
+ }
+
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ (rtt.ripas != RMI_EMPTY) || rtt.walk_level != 3U) {
+ ERROR("wrong initial state\n");
+ goto destroy_realm;
+ }
+
+ /* 2. DATA_CREATE copy TFTF_BASE, chosen for convenience, can be any adr */
+ INFO("Test 2\n");
+ ret = host_realm_delegate_map_protected_data(false, &realm, base, PAGE_SIZE, TFTF_BASE);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+
+ /* 2a DATA_DESTROY */
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ if (ret != RMI_SUCCESS || data != base) {
+ ERROR("host_rmi_data_destroy failed\n");
+ goto undelegate_destroy;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED) {
+ ERROR("Wrong state after host_rmi_data_destroy\n");
+ goto undelegate_destroy;
+ }
+
+ /* Undelegated to use helper function host_realm_delegate_map_protected_data */
+ host_rmi_granule_undelegate(base);
+
+ /*2b DATA_CREATE_UNKNOWN */
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto undelegate_destroy;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_DESTROYED)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+
+ /* 2c DATA_DESTROY */
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ if (ret != RMI_SUCCESS || data != base) {
+ ERROR("host_rmi_data_destroy failed\n");
+ goto undelegate_destroy;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_DESTROYED) {
+ ERROR("Wrong state after host_rmi_data_destroy\n");
+ goto undelegate_destroy;
+ }
+
+ host_rmi_granule_undelegate(base);
+
+ /* 3. start with new page */
+ INFO("Test 3\n");
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_EMPTY)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+ ret = host_rmi_data_destroy(realm.rd, base, &data, &top);
+ if (ret != RMI_SUCCESS || data != base) {
+ ERROR("host_rmi_data_destroy failed\n");
+ goto undelegate_destroy;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ rtt.ripas != RMI_EMPTY) {
+ ERROR("Wrong state after host_rmi_data_destroy\n");
+ goto undelegate_destroy;
+ }
+ host_rmi_granule_undelegate(base);
+
+ /* 4. start with new page */
+ INFO("Test 4\n");
+ base = (u_register_t)page_alloc(PAGE_SIZE);
+ ret = host_rmi_rtt_init_ripas(realm.rd, base, base + PAGE_SIZE, &top);
+ if (ret != RMI_SUCCESS) {
+ ERROR("%s() failed, ret=0x%lx line=%u\n",
+ "host_rmi_rtt_init_ripas", ret, __LINE__);
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_UNASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after INIT_RIPAS\n");
+ goto undelegate_destroy;
+ }
+ /* 4a. DATA_CREATE_UNKNOWN */
+ ret = host_realm_delegate_map_protected_data(true, &realm, base, PAGE_SIZE, 0U);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_realm_delegate_map_protected_data failed\n");
+ goto destroy_realm;
+ }
+ ret = host_rmi_rtt_readentry(realm.rd, base, 3U, &rtt);
+ if (ret != RMI_SUCCESS || rtt.state != RMI_ASSIGNED ||
+ (rtt.ripas != RMI_RAM)) {
+ ERROR("wrong state after DATA_CRATE_UNKNOWN\n");
+ goto undelegate_destroy;
+ }
+ host_rmi_granule_undelegate(base);
+
+ /* 5. */
+ INFO("Test 5\n");
+ test_result = test_rtt_destroy_empty(&realm);
+ if (test_result != TEST_RESULT_SUCCESS) {
+ ERROR("Test 5 failed\n");
+ goto destroy_realm;
+ }
+
+ /* 6. */
+ INFO("Test 6\n");
+ test_result = test_rtt_destroy_ram(&realm, false);
+ goto destroy_realm;
+
+undelegate_destroy:
+ ret = host_rmi_granule_undelegate(base);
+ if (ret != RMI_SUCCESS) {
+ ERROR("host_rmi_granule_undelegate failed base=0x%lx ret=0x%lx\n", base, ret);
+ }
+destroy_realm:
+ ret1 = host_destroy_realm(&realm);
+ if (!ret1) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret1);
+ return TEST_RESULT_FAIL;
+ }
+ return test_result;
+}
+
+/*
+ * Test aim is to test RTT_DESTROY for active realm
+ * Test initial state of page is unassigned empty
+ * After RTT_DESTROY verify state is unassigned destroyed
+ */
+test_result_t host_realm_pas_validation_active(void)
+{
+ bool ret;
+ test_result_t test_result = TEST_RESULT_FAIL;
+ u_register_t rec_flag[] = {RMI_RUNNABLE};
+ struct realm realm;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ if (!host_create_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ 0UL, rec_flag, 1U)) {
+ goto destroy_realm;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ test_result = test_rtt_destroy_ram(&realm, true);
+ if (test_result != TEST_RESULT_SUCCESS) {
+ ERROR("test_rtt_destroy_ram failed\n");
+ goto destroy_realm;
+ }
+
+ test_result = test_rtt_destroy_empty(&realm);
+
+destroy_realm:
+ ret = host_destroy_realm(&realm);
+
+ if (!ret) {
+ ERROR("%s(): destroy=%d\n", __func__, ret);
+ return TEST_RESULT_FAIL;
+ }
+ return test_result;
+}
+
+/*
+ * Test aims to generate SEA in Realm by accessing
+ * PAGE with IPA outside realm IPA space and
+ * Generate Data abort by accessing
+ * PAGE with IPA outside max PA supported
+ * Rec0 and Rec2 tries to create Data Abort to realm
+ * Rec1 and Rec3 tries to create Instruction Abort to realm
+ * Realm exception handler runs and returns ESR
+ * Host validates ESR
+ */
+test_result_t host_realm_sea_adr_fault(void)
+{
+ bool ret1, ret2;
+ test_result_t res = TEST_RESULT_FAIL;
+ u_register_t base_ipa, esr, feature_flag, base;
+ struct realm realm;
+ u_register_t rec_flag[4U] = {RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE, RMI_RUNNABLE};
+ struct rmi_rec_run *run;
+
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ feature_flag = INPLACE(RMI_FEATURE_REGISTER_0_S2SZ, 0x2CU);
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE,
+ feature_flag, rec_flag, 4U)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ goto destroy_realm;
+ }
+
+ /* Any Adr */
+ base = TFTF_BASE;
+ /* IPA outside Realm space */
+ base_ipa = base | (1UL << (EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ,
+ realm.rmm_feat_reg0) + 1U));
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, base_ipa);
+ host_shared_data_set_host_val(&realm, 1U, HOST_ARG1_INDEX, base_ipa);
+
+ INFO("base_ipa=0x%lx\n", base_ipa);
+
+ run = (struct rmi_rec_run *)realm.run[0];
+
+ /* Rec0 expect SEA in realm due to Data access to address outside Realm IPA size */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_HOST_CALL, 0U);
+ if (!ret1) {
+ ERROR("Rec0 did not fault exit=0x%lx ret1=%d HPFAR=0x%lx esr=0x%lx\n",
+ run->exit.exit_reason, ret1, run->exit.hpfar, run->exit.esr);
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 0U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_NO_WALK_SEA)
+ || (EC_BITS(esr) != EC_DABORT_CUR_EL)
+ || ((esr & (1UL << ESR_ISS_EABORT_EA_BIT)) == 0U)) {
+ ERROR("Rec0 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec0 ESR=0x%lx\n", esr);
+
+ run = (struct rmi_rec_run *)realm.run[1];
+
+ /* Rec1 expect SEA in realm due to Instruction access to address outside Realm IPA size */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 1U);
+ if (!ret1) {
+ ERROR("Rec1 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 1U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != IFSC_NO_WALK_SEA)
+ || (EC_BITS(esr) != EC_IABORT_CUR_EL)
+ || ((esr & (1UL << ESR_ISS_EABORT_EA_BIT)) == 0U)) {
+ ERROR("Rec1 did not fault exit=0x%lx ret1=%d HPFAR=0x%lx esr=0x%lx\n",
+ run->exit.exit_reason, ret1, run->exit.hpfar, run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("Rec1 ESR=0x%lx\n", esr);
+
+ /* IPA outside max PA supported */
+ base_ipa |= (1UL << 53U);
+
+ INFO("base_ipa=0x%lx\n", base_ipa);
+
+ host_shared_data_set_host_val(&realm, 2U, HOST_ARG1_INDEX, base_ipa);
+ host_shared_data_set_host_val(&realm, 3U, HOST_ARG1_INDEX, base_ipa);
+
+ run = (struct rmi_rec_run *)realm.run[2];
+
+ /* Rec2 expect SEA in realm due to Data access to address outside Realm IPA size */
+ ret1 = host_enter_realm_execute(&realm, REALM_DATA_ACCESS_CMD,
+ RMI_EXIT_HOST_CALL, 2U);
+ if (!ret1) {
+ ERROR("Rec2 did not fault exit=0x%lx ret1=%d HPFAR=0x%lx esr=0x%lx\n",
+ run->exit.exit_reason, ret1, run->exit.hpfar, run->exit.esr);
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 2U, HOST_ARG2_INDEX);
+ if (((esr & ISS_DFSC_MASK) != DFSC_L0_ADR_SIZE_FAULT)
+ || (EC_BITS(esr) != EC_DABORT_CUR_EL)
+ || ((esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U)) {
+ ERROR("Rec2 incorrect ESR=0x%lx\n", esr);
+ goto destroy_realm;
+ }
+ INFO("Rec2 ESR=0x%lx\n", esr);
+
+ run = (struct rmi_rec_run *)realm.run[3];
+
+ /* Rec3 expect SEA in realm due to Instruction access to address outside Realm IPA size */
+ ret1 = host_enter_realm_execute(&realm, REALM_INSTR_FETCH_CMD,
+ RMI_EXIT_HOST_CALL, 3U);
+ if (!ret1) {
+ ERROR("Rec3 did not fault\n");
+ goto destroy_realm;
+ }
+
+ /* get ESR set by Realm exception handler */
+ esr = host_shared_data_get_realm_val(&realm, 3U, HOST_ARG2_INDEX);
+ if (((esr & ISS_IFSC_MASK) != IFSC_L0_ADR_SIZE_FAULT)
+ || (EC_BITS(esr) != EC_IABORT_CUR_EL)
+ || ((esr & (1UL << ESR_ISS_EABORT_EA_BIT)) != 0U)) {
+ ERROR("Rec3 did not fault exit=0x%lx ret1=%d HPFAR=0x%lx esr=0x%lx\n",
+ run->exit.exit_reason, ret1, run->exit.hpfar, run->exit.esr);
+ goto destroy_realm;
+ }
+ INFO("Rec3 ESR=0x%lx\n", esr);
+ res = TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ ret2 = host_destroy_realm(&realm);
+
+ if (!ret2) {
+ ERROR("%s(): destroy=%d\n",
+ __func__, ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ return res;
+}
diff --git a/tftf/tests/runtime_services/realm_payload/host_realm_spm.c b/tftf/tests/runtime_services/realm_payload/host_realm_spm.c
new file mode 100644
index 000000000..51b87e7b1
--- /dev/null
+++ b/tftf/tests/runtime_services/realm_payload/host_realm_spm.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <fpu.h>
+#include <host_realm_helper.h>
+#include <host_realm_mem_layout.h>
+#include <host_shared_data.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+#define REALM_TIME_SLEEP 300U
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+static struct mailbox_buffers mb;
+static bool secure_mailbox_initialised;
+
+static fpu_state_t ns_fpu_state_write;
+static fpu_state_t ns_fpu_state_read;
+static struct realm realm;
+
+typedef enum security_state {
+ NONSECURE_WORLD = 0U,
+ REALM_WORLD,
+ SECURE_WORLD,
+ SECURITY_STATE_MAX
+} security_state_t;
+
+/*
+ * This function helps to Initialise secure_mailbox, creates realm payload and
+ * shared memory to be used between Host and Realm.
+ * Skip test if RME is not supported or not the right RMM version is begin used
+ */
+static test_result_t init_sp(void)
+{
+ if (!secure_mailbox_initialised) {
+ GET_TFTF_MAILBOX(mb);
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ secure_mailbox_initialised = true;
+ }
+ return TEST_RESULT_SUCCESS;
+}
+
+static test_result_t init_realm(void)
+{
+ u_register_t rec_flag[1] = {RMI_RUNNABLE};
+
+ /*
+ * Initialise Realm payload
+ */
+ if (!host_create_activate_realm_payload(&realm, (u_register_t)REALM_IMAGE_BASE,
+ (u_register_t)PAGE_POOL_BASE,
+ (u_register_t)PAGE_POOL_MAX_SIZE, 0UL, rec_flag, 1U)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Create shared memory between Host and Realm
+ */
+ if (!host_create_shared_mem(&realm, NS_REALM_SHARED_MEM_BASE,
+ NS_REALM_SHARED_MEM_SIZE)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+static bool host_realm_handle_fiq_exit(struct realm *realm_ptr,
+ unsigned int rec_num)
+{
+ struct rmi_rec_run *run = (struct rmi_rec_run *)realm_ptr->run[rec_num];
+ if (run->exit.exit_reason == RMI_EXIT_FIQ) {
+ return true;
+ }
+ return false;
+}
+
+/* Send request to SP to fill FPU/SIMD regs with secure template values */
+static bool fpu_fill_sec(void)
+{
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+/* Send request to SP to compare FPU/SIMD regs with secure template values */
+static bool fpu_cmp_sec(void)
+{
+ struct ffa_value ret = cactus_req_simd_compare_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+
+/* Send request to Realm to fill FPU/SIMD regs with realm template values */
+static bool fpu_fill_rl(void)
+{
+ if (!host_enter_realm_execute(&realm, REALM_REQ_FPU_FILL_CMD, RMI_EXIT_HOST_CALL, 0U)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+/* Send request to Realm to compare FPU/SIMD regs with previous realm template values */
+static bool fpu_cmp_rl(void)
+{
+ if (!host_enter_realm_execute(&realm, REALM_REQ_FPU_CMP_CMD, RMI_EXIT_HOST_CALL, 0U)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while Secure Partition is in waiting
+ * state and Realm world runs a busy loop at R-EL1.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Once the SP returns with a direct response message, it moves to WAITING
+ * state.
+ *
+ * 3. Create and execute a busy loop to sleep the PE in the realm world for
+ * REALM_TIME_SLEEP ms.
+ *
+ * 4. Trusted watchdog timer expires during this time which leads to secure
+ * interrupt being triggered while cpu is executing in realm world.
+ *
+ * 5. Realm EL1 exits to host, but because the FIQ is still pending,
+ * the Host will be pre-empted to EL3.
+ *
+ * 6. The interrupt is trapped to BL31/SPMD as FIQ and later synchronously
+ * delivered to SPM.
+ *
+ * 7. SPM injects a virtual IRQ to first Cactus Secure Partition.
+ *
+ * 8. Once the SP has handled the interrupt, it returns execution back to normal
+ * world using FFA_MSG_WAIT call.
+ *
+ * 9. TFTF parses REC's exit reason (FIQ in this case).
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 121. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 13. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ *
+ * 14. TFTF then proceed to destroy the Realm.
+ *
+ */
+test_result_t host_realm_sec_interrupt_can_preempt_rl(void)
+{
+ struct ffa_value ret_values;
+ test_result_t res;
+
+ /* Verify RME is present and RMM is not TRP */
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ /* Verify that FFA is there and that it has the correct version. */
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ res = init_sp();
+ if (res != TEST_RESULT_SUCCESS) {
+ return res;
+ }
+
+ res = init_realm();
+ if (res != TEST_RESULT_SUCCESS) {
+ return res;
+ }
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ goto destroy_realm;
+ }
+
+ /*
+ * Send a message to SP1 through direct messaging.
+ */
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER,
+ (REALM_TIME_SLEEP/2));
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ goto destroy_realm;
+ }
+
+ /*
+ * Spin Realm payload for REALM_TIME_SLEEP ms, This ensures secure wdog
+ * timer triggers during this time.
+ */
+ host_shared_data_set_host_val(&realm, 0U, HOST_ARG1_INDEX, REALM_TIME_SLEEP);
+ host_enter_realm_execute(&realm, REALM_SLEEP_CMD, RMI_EXIT_FIQ, 0U);
+
+ /*
+ * Check if Realm exit reason is FIQ.
+ */
+ if (!host_realm_handle_fiq_exit(&realm, 0U)) {
+ ERROR("Trusted watchdog timer interrupt not fired\n");
+ goto destroy_realm;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ goto destroy_realm;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ goto destroy_realm;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ goto destroy_realm;
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ ERROR("host_destroy_realm error\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+
+destroy_realm:
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+}
+
+/* Choose a random security state that is different from the 'current' state */
+static security_state_t get_random_security_state(security_state_t current,
+ bool is_sp_present)
+{
+ security_state_t next;
+
+ /*
+ * 3 world config: Switch between NS world and Realm world as Secure
+ * world is not enabled or SP is not loaded.
+ */
+ if (!is_sp_present) {
+ if (current == NONSECURE_WORLD) {
+ return REALM_WORLD;
+ } else {
+ return NONSECURE_WORLD;
+ }
+ }
+
+ /*
+ * 4 world config: Randomly select a security_state between Realm, NS
+ * and Secure until the new state is not equal to the current state.
+ */
+ while (true) {
+ next = rand() % SECURITY_STATE_MAX;
+ if (next == current) {
+ continue;
+ }
+
+ break;
+ }
+
+ return next;
+}
+
+/*
+ * Test whether FPU/SIMD state (32 SIMD vectors, FPCR and FPSR registers) are
+ * preserved during a random context switch between Secure/Non-Secure/Realm world
+ *
+ * Below steps are performed by this test:
+ *
+ * Init:
+ * Fill FPU registers with random values in
+ * 1. NS World (NS-EL2)
+ * 2. Realm world (R-EL1)
+ * 3. Secure world (S-EL1) (if SP loaded)
+ *
+ * Test loop:
+ * security_state_next = get_random_security_state(current, is_sp_present)
+ *
+ * switch to security_state_next
+ * if (FPU registers read != last filled values)
+ * break loop; return TC_FAIL
+ *
+ * Fill FPU registers with new random values for the next comparison.
+ */
+test_result_t host_realm_fpu_access_in_rl_ns_se(void)
+{
+ security_state_t sec_state;
+ bool is_sp_present;
+ test_result_t res;
+
+ /* Verify RME is present and RMM is not TRP */
+ SKIP_TEST_IF_RME_NOT_SUPPORTED_OR_RMM_IS_TRP();
+
+ /* Verify that FFA is there and that it has the correct version. */
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ res = init_realm();
+ if (res != TEST_RESULT_SUCCESS) {
+ return res;
+ }
+
+ /* Fill FPU registers in Non-secure world */
+ fpu_state_write_rand(&ns_fpu_state_write);
+
+ /* Fill FPU registers in Realm world */
+ if (!fpu_fill_rl()) {
+ ERROR("fpu_fill_rl error\n");
+ goto destroy_realm;
+ }
+ sec_state = REALM_WORLD;
+
+ /* Fill FPU registers in Secure world if present */
+ res = init_sp();
+ if (res == TEST_RESULT_SUCCESS) {
+ if (!fpu_fill_sec()) {
+ ERROR("fpu_fill_sec error\n");
+ goto destroy_realm;
+ }
+
+ sec_state = SECURE_WORLD;
+ is_sp_present = true;
+ } else {
+ is_sp_present = false;
+ }
+
+ for (uint32_t i = 0; i < 128; i++) {
+ sec_state = get_random_security_state(sec_state, is_sp_present);
+
+ switch (sec_state) {
+ case NONSECURE_WORLD:
+ /* NS world verify its FPU/SIMD state registers */
+ fpu_state_read(&ns_fpu_state_read);
+ if (fpu_state_compare(&ns_fpu_state_write,
+ &ns_fpu_state_read)) {
+ ERROR("%s failed %d\n", __func__, __LINE__);
+ goto destroy_realm;
+ }
+
+ /* Fill FPU state with new random values in NS world */
+ fpu_state_write_rand(&ns_fpu_state_write);
+ break;
+ case REALM_WORLD:
+ /* Realm world verify its FPU/SIMD state registers */
+ if (!fpu_cmp_rl()) {
+ goto destroy_realm;
+ }
+
+ /* Fill FPU state with new random values in Realm */
+ if (!fpu_fill_rl()) {
+ goto destroy_realm;
+ }
+
+ break;
+ case SECURE_WORLD:
+ /* Secure world verify its FPU/SIMD state registers */
+ if (!fpu_cmp_sec()) {
+ goto destroy_realm;
+ }
+
+ /* Fill FPU state with new random values in SP */
+ if (!fpu_fill_sec()) {
+ goto destroy_realm;
+
+ }
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!host_destroy_realm(&realm)) {
+ ERROR("host_destroy_realm error\n");
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+destroy_realm:
+ host_destroy_realm(&realm);
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S b/tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S
new file mode 100644
index 000000000..0d5395fa1
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/aarch32/ffa_arch_helpers.S
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .macro service_call _conduit
+ /* Push r9 to keep the stack pointer aligned to 64 bit. */
+ push {r4 - r9}
+
+ /*
+ * Save the ffa_value pointer in a callee saved register.
+ */
+ mov r8, r0
+
+ /* Load the argument values into the appropriate registers. */
+ ldm r0, {r0 - r7}
+
+ \_conduit #0
+
+ /*
+ * The return values are stored in x0-x7, put them in the ffa_value
+ * return structure.
+ */
+ stm r8, {r0 - r7}
+
+ pop {r4 - r9}
+ .endm
+
+.globl ffa_svc
+func ffa_svc
+ service_call svc
+ bx lr
+endfunc ffa_svc
+
+.globl ffa_smc
+func ffa_smc
+ service_call smc
+ bx lr
+endfunc ffa_smc
diff --git a/tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S b/tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S
new file mode 100644
index 000000000..b9c9cd9fd
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/aarch64/ffa_arch_helpers.S
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .macro service_call _conduit
+ /*
+ * Use a callee saved register to point to ffa_value structure after
+ * returning from the conduit.
+ * Although x19 contains an 8-byte value, we are allocating 16 bytes
+ * on the stack to respect the 16-byte stack-alignment.
+ */
+ str x19, [sp, #-16]!
+
+ /*
+ * Save pointed to ffa_value structure into x19, which is a callee saved
+ * register.
+ */
+ mov x19, x0
+ /* Load the argument values into the appropriate registers. */
+ ldp x16, x17, [x0, #128]
+ ldp x14, x15, [x0, #112]
+ ldp x12, x13, [x0, #96]
+ ldp x10, x11, [x0, #80]
+ ldp x8, x9, [x0, #64]
+ ldp x6, x7, [x0, #48]
+ ldp x4, x5, [x0, #32]
+ ldp x2, x3, [x0, #16]
+ ldp x0, x1, [x0, #0]
+
+ \_conduit #0
+
+ /*
+ * The return values are stored in x0-x17, put them in the ffa_value
+ * return structure. x19 points to the ffa_value structure.
+ */
+ stp x0, x1, [x19, #0]
+ stp x2, x3, [x19, #16]
+ stp x4, x5, [x19, #32]
+ stp x6, x7, [x19, #48]
+ stp x8, x9, [x19, #64]
+ stp x10, x11, [x19, #80]
+ stp x12, x13, [x19, #96]
+ stp x14, x15, [x19, #112]
+ stp x16, x17, [x19, #128]
+ ldr x19, [sp], #16
+ .endm
+
+.globl ffa_svc
+func ffa_svc
+ service_call svc
+ ret
+endfunc ffa_svc
+
+.globl ffa_smc
+func ffa_smc
+ service_call smc
+ ret
+endfunc ffa_smc
diff --git a/tftf/tests/runtime_services/secure_service/ffa_helpers.c b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
index 8e7b58c6f..8b53bb001 100644
--- a/tftf/tests/runtime_services/secure_service/ffa_helpers.c
+++ b/tftf/tests/runtime_services/secure_service/ffa_helpers.c
@@ -1,13 +1,24 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <smccc.h>
+#include <assert.h>
#include <ffa_endpoints.h>
#include <ffa_helpers.h>
#include <ffa_svc.h>
+#include <smccc.h>
+
+struct ffa_value ffa_service_call(struct ffa_value *args)
+{
+#if IMAGE_IVY
+ ffa_svc(args);
+#else
+ ffa_smc(args);
+#endif
+ return *args;
+}
/*-----------------------------------------------------------------------------
* FFA_RUN
@@ -26,15 +37,15 @@
* -BUSY: vCPU is busy and caller must retry later
* -ABORTED: vCPU or VM ran into an unexpected error and has aborted
*/
-smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id)
+struct ffa_value ffa_run(uint32_t dest_id, uint32_t vcpu_id)
{
- smc_args args = {
- FFA_MSG_RUN,
+ struct ffa_value args = {
+ FFA_RUN,
(dest_id << 16) | vcpu_id,
0, 0, 0, 0, 0, 0
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/*-----------------------------------------------------------------------------
@@ -55,12 +66,12 @@ smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id)
* -BUSY: Message target is busy
* -ABORTED: Message target ran into an unexpected error and has aborted
*/
-smc_ret_values ffa_msg_send_direct_req64(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4)
+struct ffa_value ffa_msg_send_direct_req64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_REQ_SMC64,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -71,15 +82,15 @@ smc_ret_values ffa_msg_send_direct_req64(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_send_direct_req32(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint32_t arg0,
- uint32_t arg1, uint32_t arg2,
- uint32_t arg3, uint32_t arg4)
+struct ffa_value ffa_msg_send_direct_req32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_REQ_SMC32,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -90,15 +101,15 @@ smc_ret_values ffa_msg_send_direct_req32(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_send_direct_resp64(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint64_t arg0,
- uint64_t arg1, uint64_t arg2,
- uint64_t arg3, uint64_t arg4)
+struct ffa_value ffa_msg_send_direct_resp64(ffa_id_t source_id,
+ ffa_id_t dest_id, uint64_t arg0,
+ uint64_t arg1, uint64_t arg2,
+ uint64_t arg3, uint64_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_RESP_SMC64,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -109,15 +120,15 @@ smc_ret_values ffa_msg_send_direct_resp64(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_send_direct_resp32(ffa_vm_id_t source_id,
- ffa_vm_id_t dest_id, uint32_t arg0,
- uint32_t arg1, uint32_t arg2,
- uint32_t arg3, uint32_t arg4)
+struct ffa_value ffa_msg_send_direct_resp32(ffa_id_t source_id,
+ ffa_id_t dest_id, uint32_t arg0,
+ uint32_t arg1, uint32_t arg2,
+ uint32_t arg3, uint32_t arg4)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_SEND_DIRECT_RESP_SMC32,
.arg1 = ((uint32_t)(source_id << 16)) | (dest_id),
.arg2 = 0,
@@ -128,85 +139,64 @@ smc_ret_values ffa_msg_send_direct_resp32(ffa_vm_id_t source_id,
.arg7 = arg4,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-
-/**
- * Initialises the header of the given `ffa_memory_region`, not including the
- * composite memory region offset.
- */
-static void ffa_memory_region_init_header(
- struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
- ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
- ffa_memory_handle_t handle, uint32_t tag, ffa_vm_id_t receiver,
- ffa_memory_access_permissions_t permissions)
+void ffa_memory_region_init_header(struct ffa_memory_region *memory_region,
+ ffa_id_t sender,
+ ffa_memory_attributes_t attributes,
+ ffa_memory_region_flags_t flags,
+ ffa_memory_handle_t handle, uint32_t tag,
+ uint32_t receiver_count)
{
memory_region->sender = sender;
memory_region->attributes = attributes;
- memory_region->reserved_0 = 0;
memory_region->flags = flags;
memory_region->handle = handle;
memory_region->tag = tag;
- memory_region->reserved_1 = 0;
- memory_region->receiver_count = 1;
- memory_region->receivers[0].receiver_permissions.receiver = receiver;
- memory_region->receivers[0].receiver_permissions.permissions =
- permissions;
- memory_region->receivers[0].receiver_permissions.flags = 0;
- memory_region->receivers[0].reserved_0 = 0;
+ memory_region->memory_access_desc_size =
+ sizeof(struct ffa_memory_access);
+ memory_region->receiver_count = receiver_count;
+ memory_region->receivers_offset =
+ offsetof(struct ffa_memory_region, receivers);
+ memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
}
/**
- * Initialises the given `ffa_memory_region` and copies as many as possible of
- * the given constituents to it.
+ * Copies as many as possible of the given constituents to the respective
+ * memory region and sets the respective offset.
*
* Returns the number of constituents remaining which wouldn't fit, and (via
* return parameters) the size in bytes of the first fragment of data copied to
* `memory_region` (attributes, constituents and memory region header size), and
* the total size of the memory sharing message including all constituents.
*/
-uint32_t ffa_memory_region_init(
+static uint32_t ffa_memory_region_init_constituents(
struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
const struct ffa_memory_region_constituent constituents[],
- uint32_t constituent_count, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
- enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
- enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t constituent_count, uint32_t *total_length,
uint32_t *fragment_length)
{
- ffa_memory_access_permissions_t permissions = 0;
- ffa_memory_attributes_t attributes = 0;
struct ffa_composite_memory_region *composite_memory_region;
uint32_t fragment_max_constituents;
- uint32_t count_to_copy;
- uint32_t i;
uint32_t constituents_offset;
+ uint32_t count_to_copy;
- /* Set memory region's permissions. */
- ffa_set_data_access_attr(&permissions, data_access);
- ffa_set_instruction_access_attr(&permissions, instruction_access);
-
- /* Set memory region's page attributes. */
- ffa_set_memory_type_attr(&attributes, type);
- ffa_set_memory_cacheability_attr(&attributes, cacheability);
- ffa_set_memory_shareability_attr(&attributes, shareability);
-
- ffa_memory_region_init_header(memory_region, sender, attributes, flags,
- 0, tag, receiver, permissions);
/*
* Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
* ffa_memory_access)` must both be multiples of 16 (as verified by the
* asserts in `ffa_memory.c`, so it is guaranteed that the offset we
* calculate here is aligned to a 64-bit boundary and so 64-bit values
* can be copied without alignment faults.
+ * If there are multiple receiver endpoints, their respective access
+ * structure should point to the same offset value.
*/
- memory_region->receivers[0].composite_memory_region_offset =
- sizeof(struct ffa_memory_region) +
- memory_region->receiver_count *
- sizeof(struct ffa_memory_access);
+ for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
+ memory_region->receivers[i].composite_memory_region_offset =
+ sizeof(struct ffa_memory_region) +
+ memory_region->receiver_count *
+ sizeof(struct ffa_memory_access);
+ }
composite_memory_region =
ffa_memory_region_get_composite(memory_region, 0);
@@ -226,7 +216,7 @@ uint32_t ffa_memory_region_init(
count_to_copy = fragment_max_constituents;
}
- for (i = 0; i < constituent_count; ++i) {
+ for (uint32_t i = 0; i < constituent_count; ++i) {
if (i < count_to_copy) {
composite_memory_region->constituents[i] =
constituents[i];
@@ -254,43 +244,116 @@ uint32_t ffa_memory_region_init(
/**
* Initialises the given `ffa_memory_region` to be used for an
* `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
+ * Initialises the given `ffa_memory_region` and copies as many as possible of
+ * the given constituents to it.
+ *
+ * Returns the number of constituents remaining which wouldn't fit, and (via
+ * return parameters) the size in bytes of the first fragment of data copied to
+ * `memory_region` (attributes, constituents and memory region header size), and
+ * the total size of the memory sharing message including all constituents.
+ */
+uint32_t ffa_memory_region_init(
+ struct ffa_memory_region *memory_region, size_t memory_region_max_size,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t tag,
+ ffa_memory_region_flags_t flags, enum ffa_memory_type type,
+ enum ffa_memory_cacheability cacheability,
+ enum ffa_memory_shareability shareability, uint32_t *total_length,
+ uint32_t *fragment_length)
+{
+ ffa_memory_attributes_t attributes = {
+ .type = type,
+ .cacheability = cacheability,
+ .shareability = shareability,
+ };
+
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ 0, tag, receiver_count);
+
+ memcpy(memory_region->receivers, receivers,
+ receiver_count * sizeof(struct ffa_memory_access));
+
+ return ffa_memory_region_init_constituents(
+ memory_region, memory_region_max_size, constituents,
+ constituent_count, total_length, fragment_length);
+}
+
+uint32_t ffa_memory_fragment_init(
+ struct ffa_memory_region_constituent *fragment,
+ size_t fragment_max_size,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t *fragment_length)
+{
+ const uint32_t fragment_max_constituents =
+ fragment_max_size /
+ sizeof(struct ffa_memory_region_constituent);
+
+ uint32_t count_to_copy =
+ MIN(constituent_count, fragment_max_constituents);
+
+ for (uint32_t i = 0; i < count_to_copy; ++i) {
+ fragment[i] = constituents[i];
+ }
+
+ if (fragment_length != NULL) {
+ *fragment_length = count_to_copy *
+ sizeof(struct ffa_memory_region_constituent);
+ }
+
+ return constituent_count - count_to_copy;
+}
+
+/**
+ * Initialises the given `ffa_memory_region` to be used for an
+ * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
*
* Returns the size of the message written.
*/
uint32_t ffa_memory_retrieve_request_init(
struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
- ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
- ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
- enum ffa_instruction_access instruction_access,
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
enum ffa_memory_shareability shareability)
{
- ffa_memory_access_permissions_t permissions = 0;
- ffa_memory_attributes_t attributes = 0;
+ ffa_memory_attributes_t attributes = {
+ .type = type,
+ .cacheability = cacheability,
+ .shareability = shareability,
+ };
- /* Set memory region's permissions. */
- ffa_set_data_access_attr(&permissions, data_access);
- ffa_set_instruction_access_attr(&permissions, instruction_access);
+ ffa_memory_region_init_header(memory_region, sender, attributes, flags,
+ handle, tag, receiver_count);
- /* Set memory region's page attributes. */
- ffa_set_memory_type_attr(&attributes, type);
- ffa_set_memory_cacheability_attr(&attributes, cacheability);
- ffa_set_memory_shareability_attr(&attributes, shareability);
+ memcpy(memory_region->receivers, receivers,
+ receiver_count * sizeof(struct ffa_memory_access));
- ffa_memory_region_init_header(memory_region, sender, attributes, flags,
- handle, tag, receiver, permissions);
/*
* Offset 0 in this case means that the hypervisor should allocate the
* address ranges. This is the only configuration supported by Hafnium,
* as it enforces 1:1 mappings in the stage 2 page tables.
*/
- memory_region->receivers[0].composite_memory_region_offset = 0;
- memory_region->receivers[0].reserved_0 = 0;
+ for (uint32_t i = 0; i < receiver_count; i++) {
+ memory_region->receivers[i].composite_memory_region_offset = 0;
+ memory_region->receivers[i].reserved_0 = 0;
+ }
return sizeof(struct ffa_memory_region) +
memory_region->receiver_count * sizeof(struct ffa_memory_access);
}
+/**
+ * Configure `region` for a hypervisor retrieve request - i.e. all fields except
+ * `handle` are initialized to 0.
+ */
+void ffa_hypervisor_retrieve_request_init(struct ffa_memory_region *region,
+ ffa_memory_handle_t handle)
+{
+ memset(region, 0, sizeof(struct ffa_memory_region));
+ region->handle = handle;
+}
/*
* FFA Version ABI helper.
@@ -298,98 +361,159 @@ uint32_t ffa_memory_retrieve_request_init(
* -Bits[30:16]: Major version.
* -Bits[15:0]: Minor version.
*/
-smc_ret_values ffa_version(uint32_t input_version)
+struct ffa_value ffa_version(uint32_t input_version)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_VERSION,
.arg1 = input_version
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_id_get(void)
+struct ffa_value ffa_id_get(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_ID_GET
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_spm_id_get(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_SPM_ID_GET
+ };
+
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_msg_wait(void)
+struct ffa_value ffa_msg_wait(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MSG_WAIT
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
-smc_ret_values ffa_error(int32_t error_code)
+struct ffa_value ffa_error(int32_t error_code)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_ERROR,
.arg1 = 0,
.arg2 = error_code
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Query the higher EL if the requested FF-A feature is implemented. */
-smc_ret_values ffa_features(uint32_t feature)
+struct ffa_value ffa_features(uint32_t feature)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_FEATURES,
.arg1 = feature
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+/* Query the higher EL if the requested FF-A feature is implemented. */
+struct ffa_value ffa_features_with_input_property(uint32_t feature, uint32_t param)
+{
+ struct ffa_value args = {
+ .fid = FFA_FEATURES,
+ .arg1 = feature,
+ .arg2 = param,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/* Get information about VMs or SPs based on UUID, using registers. */
+struct ffa_value ffa_partition_info_get_regs(const struct ffa_uuid uuid,
+ const uint16_t start_index,
+ const uint16_t tag)
+{
+ uint64_t arg1 = (uint64_t)uuid.uuid[1] << 32 | uuid.uuid[0];
+ uint64_t arg2 = (uint64_t)uuid.uuid[3] << 32 | uuid.uuid[2];
+ uint64_t arg3 = start_index | (uint64_t)tag << 16;
+
+ struct ffa_value args = {
+ .fid = FFA_PARTITION_INFO_GET_REGS_SMC64,
+ .arg1 = arg1,
+ .arg2 = arg2,
+ .arg3 = arg3,
+ };
+
+ return ffa_service_call(&args);
}
/* Get information about VMs or SPs based on UUID */
-smc_ret_values ffa_partition_info_get(const uint32_t uuid[4])
+struct ffa_value ffa_partition_info_get(const struct ffa_uuid uuid)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_PARTITION_INFO_GET,
- .arg1 = uuid[0],
- .arg2 = uuid[1],
- .arg3 = uuid[2],
- .arg4 = uuid[3]
+ .arg1 = uuid.uuid[0],
+ .arg2 = uuid.uuid[1],
+ .arg3 = uuid.uuid[2],
+ .arg4 = uuid.uuid[3]
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Query SPMD that the rx buffer of the partition can be released */
-smc_ret_values ffa_rx_release(void)
+struct ffa_value ffa_rx_release(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_RX_RELEASE
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Map the RXTX buffer */
-smc_ret_values ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages)
+struct ffa_value ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_RXTX_MAP_SMC64,
.arg1 = send,
.arg2 = recv,
- .arg3 = pages
+ .arg3 = pages,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+/* Unmap the RXTX buffer allocated by the given FF-A component */
+struct ffa_value ffa_rxtx_unmap(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_RXTX_UNMAP,
+ .arg1 = FFA_PARAM_MBZ,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
}
/* Donate memory to another partition */
-smc_ret_values ffa_mem_donate(uint32_t descriptor_length,
+struct ffa_value ffa_mem_donate(uint32_t descriptor_length,
uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_DONATE_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -397,14 +521,14 @@ smc_ret_values ffa_mem_donate(uint32_t descriptor_length,
.arg4 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Lend memory to another partition */
-smc_ret_values ffa_mem_lend(uint32_t descriptor_length,
- uint32_t fragment_length)
+struct ffa_value ffa_mem_lend(uint32_t descriptor_length,
+ uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_LEND_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -412,14 +536,14 @@ smc_ret_values ffa_mem_lend(uint32_t descriptor_length,
.arg4 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Share memory with another partition */
-smc_ret_values ffa_mem_share(uint32_t descriptor_length,
- uint32_t fragment_length)
+struct ffa_value ffa_mem_share(uint32_t descriptor_length,
+ uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_SHARE_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -427,14 +551,14 @@ smc_ret_values ffa_mem_share(uint32_t descriptor_length,
.arg4 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Retrieve memory shared by another partition */
-smc_ret_values ffa_mem_retrieve_req(uint32_t descriptor_length,
- uint32_t fragment_length)
+struct ffa_value ffa_mem_retrieve_req(uint32_t descriptor_length,
+ uint32_t fragment_length)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_RETRIEVE_REQ_SMC32,
.arg1 = descriptor_length,
.arg2 = fragment_length,
@@ -445,28 +569,240 @@ smc_ret_values ffa_mem_retrieve_req(uint32_t descriptor_length,
.arg7 = FFA_PARAM_MBZ
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Relinquish access to memory region */
-smc_ret_values ffa_mem_relinquish(void)
+struct ffa_value ffa_mem_relinquish(void)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_RELINQUISH,
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
}
/* Reclaim exclusive access to owned memory region */
-smc_ret_values ffa_mem_reclaim(uint64_t handle, uint32_t flags)
+struct ffa_value ffa_mem_reclaim(uint64_t handle, uint32_t flags)
{
- smc_args args = {
+ struct ffa_value args = {
.fid = FFA_MEM_RECLAIM,
.arg1 = (uint32_t) handle,
.arg2 = (uint32_t) (handle >> 32),
.arg3 = flags
};
- return tftf_smc(&args);
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_mem_frag_rx(ffa_memory_handle_t handle,
+ uint32_t fragment_offset)
+{
+ /* Note that sender MBZ at virtual instance. */
+ struct ffa_value args = {
+ .fid = FFA_MEM_FRAG_RX,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = fragment_offset,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_mem_frag_tx(ffa_memory_handle_t handle,
+ uint32_t fragment_length)
+{
+ struct ffa_value args = {
+ .fid = FFA_MEM_FRAG_TX,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = fragment_length,
+ };
+
+ /* Note that sender MBZ at virtual instance. */
+ return ffa_service_call(&args);
+}
+
+/** Create Notifications Bitmap for the given VM */
+struct ffa_value ffa_notification_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BITMAP_CREATE,
+ .arg1 = vm_id,
+ .arg2 = vcpu_count,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Destroy Notifications Bitmap for the given VM */
+struct ffa_value ffa_notification_bitmap_destroy(ffa_id_t vm_id)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BITMAP_DESTROY,
+ .arg1 = vm_id,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Bind VM to all the notifications in the bitmap */
+struct ffa_value ffa_notification_bind(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_BIND,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = (uint32_t)(bitmap & 0xFFFFFFFFU),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+/** Unbind previously bound VM from notifications in bitmap */
+struct ffa_value ffa_notification_unbind(ffa_id_t sender,
+ ffa_id_t receiver,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_UNBIND,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = (uint32_t)(bitmap),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ,
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_set(ffa_id_t sender, ffa_id_t receiver,
+ uint32_t flags,
+ ffa_notification_bitmap_t bitmap)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_SET,
+ .arg1 = (sender << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = (uint32_t)(bitmap & 0xFFFFFFFFU),
+ .arg4 = (uint32_t)(bitmap >> 32),
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_get(ffa_id_t receiver, uint32_t vcpu_id,
+ uint32_t flags)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_GET,
+ .arg1 = (vcpu_id << 16) | (receiver),
+ .arg2 = flags,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+struct ffa_value ffa_notification_info_get(void)
+{
+ struct ffa_value args = {
+ .fid = FFA_NOTIFICATION_INFO_GET_SMC64,
+ .arg1 = FFA_PARAM_MBZ,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ return ffa_service_call(&args);
+}
+
+static size_t char_to_arg_helper(const char *message, size_t size,
+ u_register_t *arg)
+{
+ size_t to_write = size > sizeof(uint64_t) ? sizeof(uint64_t) : size;
+
+ for (int i = 0; i < to_write; i++) {
+ ((char *)arg)[i] = message[i];
+ }
+ return to_write;
+}
+
+struct ffa_value ffa_console_log(const char *message, size_t char_count)
+{
+ struct ffa_value args = {
+ .fid = FFA_CONSOLE_LOG_SMC64,
+ .arg1 = char_count,
+ };
+ size_t written = 0;
+
+ assert(char_count <= sizeof(uint64_t) * 6);
+
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg2);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg3);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg4);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg5);
+ written += char_to_arg_helper(&message[written], char_count - written,
+ &args.arg6);
+ char_to_arg_helper(&message[written], char_count - written,
+ &args.arg7);
+
+ return ffa_service_call(&args);
+}
+
+/**
+ * Initializes receiver permissions in a memory transaction descriptor.
+ */
+struct ffa_memory_access ffa_memory_access_init(
+ ffa_id_t receiver_id, enum ffa_data_access data_access,
+ enum ffa_instruction_access instruction_access,
+ ffa_memory_receiver_flags_t flags,
+ struct ffa_memory_access_impdef *impdef)
+{
+ struct ffa_memory_access access;
+ access.reserved_0 = 0;
+ access.composite_memory_region_offset = 0;
+ access.receiver_permissions.flags = flags;
+ access.receiver_permissions.receiver = receiver_id;
+ access.receiver_permissions.permissions.data_access = data_access;
+ access.receiver_permissions.permissions.instruction_access =
+ instruction_access;
+ access.impdef = impdef != NULL ? *impdef :
+ (struct ffa_memory_access_impdef){{0, 0}};
+
+ return access;
}
diff --git a/tftf/tests/runtime_services/secure_service/spm_common.c b/tftf/tests/runtime_services/secure_service/spm_common.c
index 179ef1cb9..ee25c82f6 100644
--- a/tftf/tests/runtime_services/secure_service/spm_common.c
+++ b/tftf/tests/runtime_services/secure_service/spm_common.c
@@ -1,22 +1,24 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include "stdint.h"
+
+#include "ffa_helpers.h"
+#include <cactus_test_cmds.h>
#include <debug.h>
#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <lib/extensions/sve.h>
#include <spm_common.h>
#include <xlat_tables_v2.h>
-#define __STR(x) #x
-#define STR(x) __STR(x)
-#define SIMD_TWO_VECTORS_BYTES_STR (2 * SIMD_VECTOR_LEN_BYTES)
-
/**
* Helper to log errors after FF-A calls.
*/
-bool is_ffa_call_error(smc_ret_values ret)
+bool is_ffa_call_error(struct ffa_value ret)
{
if (ffa_func_id(ret) == FFA_ERROR) {
VERBOSE("FF-A call returned error (%x): %d\n",
@@ -26,12 +28,25 @@ bool is_ffa_call_error(smc_ret_values ret)
return false;
}
+bool is_expected_ffa_error(struct ffa_value ret, int32_t error_code)
+{
+ if (ffa_func_id(ret) == FFA_ERROR &&
+ ffa_error_code(ret) == error_code) {
+ return true;
+ }
+
+ ERROR("Expected FFA_ERROR(%x), code: %d, got %x %d\n",
+ FFA_ERROR, error_code, ffa_func_id(ret), ffa_error_code(ret));
+
+ return false;
+}
+
/**
* Helper to verify return of FF-A call is an FFA_MSG_SEND_DIRECT_RESP.
* Should be used after FFA_MSG_SEND_DIRECT_REQ, or after sending a test command
* to an SP.
*/
-bool is_ffa_direct_response(smc_ret_values ret)
+bool is_ffa_direct_response(struct ffa_value ret)
{
if ((ffa_func_id(ret) == FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
(ffa_func_id(ret) == FFA_MSG_SEND_DIRECT_RESP_SMC64)) {
@@ -48,7 +63,7 @@ bool is_ffa_direct_response(smc_ret_values ret)
/**
* Helper to check the return value of FF-A call is as expected.
*/
-bool is_expected_ffa_return(smc_ret_values ret, uint32_t func_id)
+bool is_expected_ffa_return(struct ffa_value ret, uint32_t func_id)
{
if (ffa_func_id(ret) == func_id) {
return true;
@@ -59,56 +74,36 @@ bool is_expected_ffa_return(smc_ret_values ret, uint32_t func_id)
return false;
}
-void fill_simd_vector_regs(const simd_vector_t v[SIMD_NUM_VECTORS])
+bool is_expected_cactus_response(struct ffa_value ret, uint32_t expected_resp,
+ uint32_t arg)
{
-#ifdef __aarch64__
- __asm__ volatile(
- "ldp q0, q1, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q2, q3, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q4, q5, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q6, q7, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q8, q9, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q10, q11, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q12, q13, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q14, q15, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q16, q17, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q18, q19, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q20, q21, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q22, q23, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q24, q25, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q26, q27, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q28, q29, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "ldp q30, q31, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "sub %0, %0, #" STR(SIMD_NUM_VECTORS * SIMD_VECTOR_LEN_BYTES) ";"
- : : "r" (v));
-#endif
+ if (!is_ffa_direct_response(ret)) {
+ return false;
+ }
+
+ if (cactus_get_response(ret) != expected_resp ||
+ (uint32_t)ret.arg4 != arg) {
+ VERBOSE("Expected response %x and %x; "
+ "Obtained %x and %x\n",
+ expected_resp, arg, cactus_get_response(ret),
+ (int32_t)ret.arg4);
+ return false;
+ }
+
+ return true;
}
-void read_simd_vector_regs(simd_vector_t v[SIMD_NUM_VECTORS])
+void dump_ffa_value(struct ffa_value ret)
{
-#ifdef __aarch64__
- memset(v, 0, sizeof(simd_vector_t) * SIMD_NUM_VECTORS);
-
- __asm__ volatile(
- "stp q0, q1, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q2, q3, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q4, q5, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q6, q7, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q8, q9, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q10, q11, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q12, q13, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q14, q15, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q16, q17, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q18, q19, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q20, q21, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q22, q23, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q24, q25, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q26, q27, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q28, q29, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "stp q30, q31, [%0], #" STR(SIMD_TWO_VECTORS_BYTES_STR) ";"
- "sub %0, %0, #" STR(SIMD_NUM_VECTORS * SIMD_VECTOR_LEN_BYTES) ";"
- : : "r" (v));
-#endif
+ NOTICE("FF-A value: %lx, %lx, %lx, %lx, %lx, %lx, %lx, %lx\n",
+ ret.fid,
+ ret.arg1,
+ ret.arg2,
+ ret.arg3,
+ ret.arg4,
+ ret.arg5,
+ ret.arg6,
+ ret.arg7);
}
/*
@@ -125,17 +120,17 @@ void read_simd_vector_regs(simd_vector_t v[SIMD_NUM_VECTORS])
bool check_spmc_execution_level(void)
{
unsigned int is_optee_spmc_criteria = 0U;
- smc_ret_values ret_values;
+ struct ffa_value ret_values;
/*
* Send a first OP-TEE-defined protocol message through
- * FFA direct message.
+ * FFA direct message. Expect it to implement either v1.0 or v1.1.
*/
ret_values = ffa_msg_send_direct_req32(HYP_ID, SP_ID(1),
OPTEE_FFA_GET_API_VERSION, 0,
0, 0, 0);
- if ((ret_values.ret3 == FFA_VERSION_MAJOR) &&
- (ret_values.ret4 == FFA_VERSION_MINOR)) {
+ if (ret_values.arg3 == 1 &&
+ (ret_values.arg4 == 0 || ret_values.arg4 == 1)) {
is_optee_spmc_criteria++;
}
@@ -146,8 +141,8 @@ bool check_spmc_execution_level(void)
ret_values = ffa_msg_send_direct_req32(HYP_ID, SP_ID(1),
OPTEE_FFA_GET_OS_VERSION,
0, 0, 0, 0);
- if ((ret_values.ret3 == OPTEE_FFA_GET_OS_VERSION_MAJOR) &&
- (ret_values.ret4 == OPTEE_FFA_GET_OS_VERSION_MINOR)) {
+ if ((ret_values.arg3 == OPTEE_FFA_GET_OS_VERSION_MAJOR) &&
+ (ret_values.arg4 == OPTEE_FFA_GET_OS_VERSION_MINOR)) {
is_optee_spmc_criteria++;
}
@@ -155,30 +150,46 @@ bool check_spmc_execution_level(void)
}
static const struct ffa_features_test ffa_feature_test_target[] = {
- {"FFA_ERROR_32 check", FFA_ERROR, FFA_SUCCESS_SMC32},
- {"FFA_SUCCESS_32 check", FFA_SUCCESS_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_INTERRUPT_32 check", FFA_INTERRUPT, FFA_SUCCESS_SMC32},
- {"FFA_VERSION_32 check", FFA_VERSION, FFA_SUCCESS_SMC32},
- {"FFA_FEATURES_32 check", FFA_FEATURES, FFA_SUCCESS_SMC32},
- {"FFA_RX_RELEASE_32 check", FFA_RX_RELEASE, FFA_SUCCESS_SMC32},
- {"FFA_RXTX_MAP_32 check", FFA_RXTX_MAP_SMC32, FFA_ERROR},
- {"FFA_RXTX_MAP_64 check", FFA_RXTX_MAP_SMC64, FFA_SUCCESS_SMC32},
- {"FFA_RXTX_UNMAP_32 check", FFA_RXTX_UNMAP, FFA_ERROR},
- {"FFA_PARTITION_INFO_GET_32 check", FFA_PARTITION_INFO_GET, FFA_SUCCESS_SMC32},
- {"FFA_ID_GET_32 check", FFA_ID_GET, FFA_SUCCESS_SMC32},
- {"FFA_MSG_POLL_32 check", FFA_MSG_POLL, FFA_SUCCESS_SMC32},
- {"FFA_MSG_WAIT_32 check", FFA_MSG_WAIT, FFA_SUCCESS_SMC32},
- {"FFA_YIELD_32 check", FFA_MSG_YIELD, FFA_SUCCESS_SMC32},
- {"FFA_RUN_32 check", FFA_MSG_RUN, FFA_SUCCESS_SMC32},
- {"FFA_MSG_SEND_32 check", FFA_MSG_SEND, FFA_SUCCESS_SMC32},
- {"FFA_MEM_DONATE_32 check", FFA_MEM_DONATE_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_LEND_32 check", FFA_MEM_LEND_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_SHARE_32 check", FFA_MEM_SHARE_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RETRIEVE_REQ_32 check", FFA_MEM_RETRIEVE_REQ_SMC32, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RETRIEVE_RESP_32 check", FFA_MEM_RETRIEVE_RESP, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RELINQUISH_32 check", FFA_MEM_RELINQUISH, FFA_SUCCESS_SMC32},
- {"FFA_MEM_RECLAIM_32 check", FFA_MEM_RECLAIM, FFA_SUCCESS_SMC32},
- {"Check non-existent command", 0xFFFF, FFA_ERROR}
+ {"FFA_ERROR_32", FFA_ERROR, FFA_SUCCESS_SMC32},
+ {"FFA_SUCCESS_32", FFA_SUCCESS_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_INTERRUPT_32", FFA_INTERRUPT, FFA_SUCCESS_SMC32},
+ {"FFA_VERSION_32", FFA_VERSION, FFA_SUCCESS_SMC32},
+ {"FFA_FEATURES_32", FFA_FEATURES, FFA_SUCCESS_SMC32},
+ {"FFA_RX_RELEASE_32", FFA_RX_RELEASE, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_MAP_32", FFA_RXTX_MAP_SMC32, FFA_ERROR},
+ {"FFA_RXTX_MAP_64", FFA_RXTX_MAP_SMC64, FFA_SUCCESS_SMC32},
+ {"FFA_RXTX_UNMAP_32", FFA_RXTX_UNMAP, FFA_SUCCESS_SMC32},
+ {"FFA_PARTITION_INFO_GET_32", FFA_PARTITION_INFO_GET, FFA_SUCCESS_SMC32},
+ {"FFA_ID_GET_32", FFA_ID_GET, FFA_SUCCESS_SMC32},
+ {"FFA_SPM_ID_GET_32", FFA_SPM_ID_GET, FFA_SUCCESS_SMC32, 0,
+ MAKE_FFA_VERSION(1, 1)},
+ {"FFA_MSG_WAIT_32", FFA_MSG_WAIT, FFA_SUCCESS_SMC32},
+ {"FFA_RUN_32", FFA_RUN, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_DONATE_32", FFA_MEM_DONATE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_LEND_32", FFA_MEM_LEND_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_SHARE_32", FFA_MEM_SHARE_SMC32, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RETRIEVE_REQ_32", FFA_MEM_RETRIEVE_REQ_SMC32,
+ FFA_SUCCESS_SMC32, FFA_FEATURES_MEM_RETRIEVE_REQ_NS_SUPPORT},
+ {"FFA_MEM_RETRIEVE_RESP_32", FFA_MEM_RETRIEVE_RESP, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RELINQUISH_32", FFA_MEM_RELINQUISH, FFA_SUCCESS_SMC32},
+ {"FFA_MEM_RECLAIM_32", FFA_MEM_RECLAIM, FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_BITMAP_CREATE_32",
+ FFA_NOTIFICATION_BITMAP_CREATE, FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_BITMAP_DESTROY_32",
+ FFA_NOTIFICATION_BITMAP_DESTROY, FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_BIND_32", FFA_NOTIFICATION_BIND,
+ FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_UNBIND_32", FFA_NOTIFICATION_UNBIND,
+ FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_SET_32", FFA_NOTIFICATION_SET,
+ FFA_SUCCESS_SMC32},
+ {"FFA_NOTIFICATION_INFO_GET_64", FFA_NOTIFICATION_INFO_GET_SMC64,
+ FFA_SUCCESS_SMC32},
+ /* Indirect messaging is only supported in Nwd */
+ {"FFA_YIELD_32", FFA_MSG_YIELD, FFA_ERROR},
+ {"FFA_MSG_SEND_32", FFA_MSG_SEND, FFA_ERROR},
+ {"FFA_MSG_POLL_32", FFA_MSG_POLL, FFA_ERROR},
+ {"Check non-existent command", 0xFFFF, FFA_ERROR},
};
/*
@@ -199,10 +210,10 @@ unsigned int get_ffa_feature_test_target(
bool memory_retrieve(struct mailbox_buffers *mb,
struct ffa_memory_region **retrieved, uint64_t handle,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
- uint32_t mem_func)
+ ffa_id_t sender, struct ffa_memory_access receivers[],
+ uint32_t receiver_count, ffa_memory_region_flags_t flags)
{
- smc_ret_values ret;
+ struct ffa_value ret;
uint32_t fragment_size;
uint32_t total_size;
uint32_t descriptor_size;
@@ -212,24 +223,16 @@ bool memory_retrieve(struct mailbox_buffers *mb,
return false;
}
- /*
- * TODO: Revise shareability attribute in function call
- * below.
- * https://lists.trustedfirmware.org/pipermail/hafnium/2020-June/000023.html
- */
descriptor_size = ffa_memory_retrieve_request_init(
- mb->send, handle, sender, receiver, 0, 0,
- FFA_DATA_ACCESS_RW,
- FFA_INSTRUCTION_ACCESS_NX,
- FFA_MEMORY_NORMAL_MEM,
- FFA_MEMORY_CACHE_WRITE_BACK,
- FFA_MEMORY_OUTER_SHAREABLE);
+ mb->send, handle, sender, receivers, receiver_count, 0, flags,
+ FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
+ FFA_MEMORY_INNER_SHAREABLE);
ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
if (ffa_func_id(ret) != FFA_MEM_RETRIEVE_RESP) {
- ERROR("Couldn't retrieve the memory page. Error: %x\n",
- ffa_error_code(ret));
+ ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+ __func__, ffa_error_code(ret));
return false;
}
@@ -242,8 +245,8 @@ bool memory_retrieve(struct mailbox_buffers *mb,
* successful ffa_mem_retrieve_req, total_size must be equal to
* fragment_size.
*/
- total_size = ret.ret1;
- fragment_size = ret.ret2;
+ total_size = ret.arg1;
+ fragment_size = ret.arg2;
if (total_size != fragment_size) {
ERROR("Only expect one memory segment to be sent!\n");
@@ -268,10 +271,148 @@ bool memory_retrieve(struct mailbox_buffers *mb,
return true;
}
+bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
+ void *out, uint32_t out_size)
+{
+ struct ffa_value ret;
+ uint32_t total_size;
+ uint32_t fragment_size;
+ uint32_t fragment_offset;
+ struct ffa_memory_region *region_out = out;
+
+ if (out == NULL || mb == NULL) {
+ ERROR("Invalid parameters!\n");
+ return false;
+ }
+
+ ffa_hypervisor_retrieve_request_init(mb->send, handle);
+ ret = ffa_mem_retrieve_req(sizeof(struct ffa_memory_region),
+ sizeof(struct ffa_memory_region));
+
+ if (ffa_func_id(ret) != FFA_MEM_RETRIEVE_RESP) {
+ ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+ __func__, ffa_error_code(ret));
+ return false;
+ }
+
+ /*
+ * Following total_size and fragment_size are useful to keep track
+ * of the state of transaction. When the sum of all fragment_size of all
+ * fragments is equal to total_size, the memory transaction has been
+ * completed.
+ */
+ total_size = ret.arg1;
+ fragment_size = ret.arg2;
+ fragment_offset = fragment_size;
+ VERBOSE("total_size=%d, fragment_size=%d, fragment_offset=%d\n",
+ total_size, fragment_size, fragment_offset);
+
+ if (fragment_size > PAGE_SIZE) {
+ ERROR("Fragment should be smaller than RX buffer!\n");
+ return false;
+ }
+ if (total_size > out_size) {
+ ERROR("output buffer is not large enough to store all "
+ "fragments (total_size=%d, max_size=%d)\n",
+ total_size, out_size);
+ return false;
+ }
+
+ /*
+ * Copy the received message to the out buffer. This is necessary
+ * because `mb->recv` will be overwritten if sending a fragmented
+ * message.
+ */
+ memcpy(out, mb->recv, fragment_size);
+
+ if (region_out->receiver_count == 0) {
+ VERBOSE("copied region has no recivers\n");
+ return false;
+ }
+
+ if (region_out->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
+ VERBOSE("SPMC memory sharing operations support max of %u "
+ "receivers!\n",
+ MAX_MEM_SHARE_RECIPIENTS);
+ return false;
+ }
+
+ while (fragment_offset < total_size) {
+ VERBOSE("Calling again. frag offset: %d; total: %d\n",
+ fragment_offset, total_size);
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ ret = ffa_mem_frag_rx(handle, fragment_offset);
+ if (ret.fid != FFA_MEM_FRAG_TX) {
+ ERROR("ffa_mem_frag_rx() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ if (ffa_frag_handle(ret) != handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, "
+ "got "
+ "%llu\n",
+ __func__, handle, ffa_frag_handle(ret));
+ return false;
+ }
+
+ /* Sender MBZ at physical instance. */
+ if (ffa_frag_sender(ret) != 0) {
+ ERROR("%s: fragment sender mismatch: expected %d, got "
+ "%d\n",
+ __func__, 0, ffa_frag_sender(ret));
+ return false;
+ }
+
+ fragment_size = ret.arg2;
+ if (fragment_size == 0) {
+ ERROR("%s: fragment size must not be 0\n", __func__);
+ return false;
+ }
+
+ if (fragment_offset + fragment_size > out_size) {
+ ERROR("%s: fragment is too big to fit in out buffer "
+ "(%d > %d)\n",
+ __func__, fragment_offset + fragment_size,
+ out_size);
+ return false;
+ }
+
+ VERBOSE("copying fragment at offset %d with size %d\n",
+ fragment_offset, fragment_size);
+ memcpy((uint8_t *)out + fragment_offset, mb->recv,
+ fragment_size);
+
+ fragment_offset += fragment_size;
+ }
+
+ if (fragment_offset != total_size) {
+ ERROR("%s: fragment size mismatch: expected %d, got %d\n",
+ __func__, total_size, fragment_offset);
+ return false;
+ }
+
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n", ffa_error_code(ret));
+ return false;
+ }
+
+ VERBOSE("Memory Retrieved!\n");
+
+ return true;
+}
+
bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
- ffa_vm_id_t id)
+ ffa_id_t id)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ffa_mem_relinquish_init(m, handle, 0, id);
ret = ffa_mem_relinquish();
@@ -285,48 +426,135 @@ bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
return true;
}
+bool send_fragmented_memory_region(
+ void *send_buffer,
+ const struct ffa_memory_region_constituent constituents[],
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t sent_length, uint32_t total_length, bool allocator_is_spmc,
+ struct ffa_value ret)
+{
+
+ uint64_t handle;
+ uint64_t handle_mask;
+ uint64_t expected_handle_mask =
+ allocator_is_spmc ? FFA_MEMORY_HANDLE_ALLOCATOR_SPMC
+ : FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
+ ffa_memory_handle_t fragment_handle = FFA_MEMORY_HANDLE_INVALID;
+ uint32_t fragment_length;
+
+ /* Send the remaining fragments. */
+ while (remaining_constituent_count != 0) {
+ VERBOSE("%s: %d constituents left to send.\n", __func__,
+ remaining_constituent_count);
+ if (ret.fid != FFA_MEM_FRAG_RX) {
+ ERROR("ffa_mem_frax_tx() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ if (fragment_handle == FFA_MEMORY_HANDLE_INVALID) {
+ fragment_handle = ffa_frag_handle(ret);
+ } else if (ffa_frag_handle(ret) != fragment_handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, "
+ "got %llu\n",
+ __func__, fragment_handle, ffa_frag_handle(ret));
+ return false;
+ }
+
+ if (ret.arg3 != sent_length) {
+ ERROR("%s: fragment length mismatch: expected %u, got "
+ "%lu\n",
+ __func__, sent_length, ret.arg3);
+ return false;
+ }
+
+ remaining_constituent_count = ffa_memory_fragment_init(
+ send_buffer, PAGE_SIZE,
+ constituents + constituent_count -
+ remaining_constituent_count,
+ remaining_constituent_count, &fragment_length);
+
+ ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
+ sent_length += fragment_length;
+ }
+
+ if (sent_length != total_length) {
+ ERROR("%s: fragment length mismatch: expected %u, got %u\n",
+ __func__, total_length, sent_length);
+ return false;
+ }
+
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("%s: ffa_mem_frax_tx() failed: %d\n", __func__,
+ ffa_error_code(ret));
+ return false;
+ }
+
+ handle = ffa_mem_success_handle(ret);
+ handle_mask = (handle >> FFA_MEMORY_HANDLE_ALLOCATOR_SHIFT) &
+ FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
+
+ if (handle_mask != expected_handle_mask) {
+ ERROR("%s: handle mask mismatch: expected %llu, got %llu\n",
+ __func__, expected_handle_mask, handle_mask);
+ return false;
+ }
+
+ if (fragment_handle != FFA_MEMORY_HANDLE_INVALID && handle != fragment_handle) {
+ ERROR("%s: fragment handle mismatch: expectd %d, got %llu\n",
+ __func__, fragment_length, handle);
+ return false;
+ }
+
+ return true;
+}
+
/**
* Helper to call memory send function whose func id is passed as a parameter.
- * Returns a valid handle in case of successful operation or
- * FFA_MEMORY_HANDLE_INVALID if something goes wrong.
- *
- * TODO: Do memory send with 'ffa_memory_region' taking multiple segments
*/
ffa_memory_handle_t memory_send(
- struct ffa_memory_region *memory_region, uint32_t mem_func,
- uint32_t fragment_length, uint32_t total_length)
+ void *send_buffer, uint32_t mem_func,
+ const struct ffa_memory_region_constituent *constituents,
+ uint32_t constituent_count, uint32_t remaining_constituent_count,
+ uint32_t fragment_length, uint32_t total_length,
+ struct ffa_value *ret)
{
- smc_ret_values ret;
- ffa_vm_id_t receiver =
- memory_region->receivers[0].receiver_permissions.receiver;
-
- if (fragment_length != total_length) {
- ERROR("For now, fragment_length and total_length need to be"
- " equal");
+ if (remaining_constituent_count == 0 && fragment_length != total_length) {
+ ERROR("%s: fragment_length and total_length need "
+ "to be equal (fragment_length = %d, total_length = %d)\n",
+ __func__, fragment_length, total_length);
return FFA_MEMORY_HANDLE_INVALID;
}
switch (mem_func) {
case FFA_MEM_SHARE_SMC32:
- ret = ffa_mem_share(total_length, fragment_length);
+ *ret = ffa_mem_share(total_length, fragment_length);
break;
case FFA_MEM_LEND_SMC32:
- ret = ffa_mem_lend(total_length, fragment_length);
+ *ret = ffa_mem_lend(total_length, fragment_length);
break;
case FFA_MEM_DONATE_SMC32:
- ret = ffa_mem_donate(total_length, fragment_length);
+ *ret = ffa_mem_donate(total_length, fragment_length);
break;
default:
- ERROR("TFTF - Invalid func id %x!\n", mem_func);
+ ERROR("%s: Invalid func id %x!\n", __func__, mem_func);
return FFA_MEMORY_HANDLE_INVALID;
}
- if (is_ffa_call_error(ret)) {
- ERROR("Failed to send message to: %x\n", receiver);
+ if (is_ffa_call_error(*ret)) {
+ VERBOSE("%s: Failed to send memory: %d\n", __func__,
+ ffa_error_code(*ret));
+ return FFA_MEMORY_HANDLE_INVALID;
+ }
+
+ if (!send_fragmented_memory_region(
+ send_buffer, constituents, constituent_count,
+ remaining_constituent_count, fragment_length, total_length,
+ true, *ret)) {
return FFA_MEMORY_HANDLE_INVALID;
}
- return ffa_mem_success_handle(ret);
+ return ffa_mem_success_handle(*ret);
}
/**
@@ -335,36 +563,270 @@ ffa_memory_handle_t memory_send(
* doing it in this file for simplicity and for testing purposes.
*/
ffa_memory_handle_t memory_init_and_send(
- struct ffa_memory_region *memory_region, size_t memory_region_max_size,
- ffa_vm_id_t sender, ffa_vm_id_t receiver,
+ void *send_buffer, size_t memory_region_max_size, ffa_id_t sender,
+ struct ffa_memory_access receivers[], uint32_t receiver_count,
const struct ffa_memory_region_constituent *constituents,
- uint32_t constituents_count, uint32_t mem_func)
+ uint32_t constituents_count, uint32_t mem_func, struct ffa_value *ret)
{
uint32_t remaining_constituent_count;
uint32_t total_length;
uint32_t fragment_length;
- enum ffa_data_access data_access = (mem_func == FFA_MEM_DONATE_SMC32) ?
- FFA_DATA_ACCESS_NOT_SPECIFIED :
- FFA_DATA_ACCESS_RW;
+ enum ffa_memory_type type =
+ (receiver_count == 1 && mem_func != FFA_MEM_SHARE_SMC32)
+ ? FFA_MEMORY_NOT_SPECIFIED_MEM
+ : FFA_MEMORY_NORMAL_MEM;
remaining_constituent_count = ffa_memory_region_init(
- memory_region, memory_region_max_size, sender, receiver, constituents,
- constituents_count, 0, 0, data_access,
- FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
- FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
- FFA_MEMORY_INNER_SHAREABLE, &total_length, &fragment_length
- );
+ send_buffer, memory_region_max_size, sender, receivers,
+ receiver_count, constituents, constituents_count, 0, 0, type,
+ FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
+ &total_length, &fragment_length);
+
+ return memory_send(send_buffer, mem_func, constituents,
+ constituents_count, remaining_constituent_count,
+ fragment_length, total_length, ret);
+}
+
+static bool ffa_uuid_equal(const struct ffa_uuid uuid1,
+ const struct ffa_uuid uuid2)
+{
+ return (uuid1.uuid[0] == uuid2.uuid[0]) &&
+ (uuid1.uuid[1] == uuid2.uuid[1]) &&
+ (uuid1.uuid[2] == uuid2.uuid[2]) &&
+ (uuid1.uuid[3] == uuid2.uuid[3]);
+}
+static bool ffa_partition_info_regs_get_part_info(
+ struct ffa_value *args, uint8_t idx,
+ struct ffa_partition_info *partition_info)
+{
/*
- * For simplicity of the test, and at least for the time being,
- * the following condition needs to be true.
+ * The list of pointers to args in return value: arg0/func encodes ff-a
+ * function, arg1 is reserved, arg2 encodes indices. arg3 and greater
+ * values reflect partition properties.
*/
- if (remaining_constituent_count != 0U) {
- ERROR("Remaining constituent should be 0\n");
- return FFA_MEMORY_HANDLE_INVALID;
+ uint64_t *arg_ptrs = (uint64_t *)args + ((idx * 3) + 3);
+ uint64_t info, uuid_lo, uuid_high;
+
+ /*
+ * Each partition information is encoded in 3 registers, so there can be
+ * a maximum of 5 entries.
+ */
+ if (idx >= 5 || !partition_info) {
+ return false;
+ }
+
+ info = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_lo = *arg_ptrs;
+
+ arg_ptrs++;
+ uuid_high = *arg_ptrs;
+
+ /*
+ * As defined in FF-A 1.2 ALP0, 14.9 FFA_PARTITION_INFO_GET_REGS.
+ */
+ partition_info->id = info & 0xFFFFU;
+ partition_info->exec_context = (info >> 16) & 0xFFFFU;
+ partition_info->properties = (info >> 32);
+ partition_info->uuid.uuid[0] = uuid_lo & 0xFFFFFFFFU;
+ partition_info->uuid.uuid[1] = (uuid_lo >> 32) & 0xFFFFFFFFU;
+ partition_info->uuid.uuid[2] = uuid_high & 0xFFFFFFFFU;
+ partition_info->uuid.uuid[3] = (uuid_high >> 32) & 0xFFFFFFFFU;
+
+ return true;
+}
+
+static bool ffa_compare_partition_info(
+ const struct ffa_uuid uuid,
+ const struct ffa_partition_info *info,
+ const struct ffa_partition_info *expected)
+{
+ bool result = true;
+ /*
+ * If a UUID is specified then the UUID returned in the
+ * partition info descriptor MBZ.
+ */
+ struct ffa_uuid expected_uuid =
+ ffa_uuid_equal(uuid, NULL_UUID) ? expected->uuid : NULL_UUID;
+
+ if (info->id != expected->id) {
+ ERROR("Wrong ID. Expected %x, got %x\n", expected->id, info->id);
+ result = false;
+ }
+
+ if (info->exec_context != expected->exec_context) {
+ ERROR("Wrong context. Expected %d, got %d\n",
+ expected->exec_context,
+ info->exec_context);
+ result = false;
+ }
+ if (info->properties != expected->properties) {
+ ERROR("Wrong properties. Expected %d, got %d\n",
+ expected->properties,
+ info->properties);
+ result = false;
+ }
+
+ if (!ffa_uuid_equal(info->uuid, expected_uuid)) {
+ ERROR("Wrong UUID. Expected %x %x %x %x, "
+ "got %x %x %x %x\n",
+ expected_uuid.uuid[0],
+ expected_uuid.uuid[1],
+ expected_uuid.uuid[2],
+ expected_uuid.uuid[3],
+ info->uuid.uuid[0],
+ info->uuid.uuid[1],
+ info->uuid.uuid[2],
+ info->uuid.uuid[3]);
+ result = false;
+ }
+
+ return result;
+}
+
+/**
+ * Sends a ffa_partition_info_get_regs request and returns the information
+ * returned in registers in the output parameters. Validation against
+ * expected results shall be done by the caller outside the function.
+ */
+bool ffa_partition_info_regs_helper(const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size)
+{
+ /*
+ * TODO: For now, support only one invocation. Can be enhanced easily
+ * to extend to arbitrary number of partitions.
+ */
+ if (expected_size > 5) {
+ ERROR("%s only supports information received in"
+ " one invocation of the ABI (5 partitions)\n",
+ __func__);
+ return false;
+ }
+
+ struct ffa_value ret = ffa_partition_info_get_regs(uuid, 0, 0);
+
+ if (ffa_func_id(ret) != FFA_SUCCESS_SMC64) {
+ return false;
+ }
+
+ if (ffa_partition_info_regs_partition_count(ret) !=
+ expected_size) {
+ ERROR("Unexpected number of partitions %d (expected %d)\n",
+ ffa_partition_info_regs_partition_count(ret),
+ expected_size);
+ return false;
+ }
+
+ if (ffa_partition_info_regs_entry_size(ret) !=
+ sizeof(struct ffa_partition_info)) {
+ ERROR("Unexpected partition info descriptor size %d\n",
+ ffa_partition_info_regs_entry_size(ret));
+ return false;
+ }
+
+ for (unsigned int i = 0U; i < expected_size; i++) {
+ struct ffa_partition_info info = { 0 };
+
+ ffa_partition_info_regs_get_part_info(&ret, i, &info);
+ if (!ffa_compare_partition_info(uuid, &info, &expected[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Sends a ffa_partition_info request and checks the response against the
+ * target.
+ */
+bool ffa_partition_info_helper(struct mailbox_buffers *mb,
+ const struct ffa_uuid uuid,
+ const struct ffa_partition_info *expected,
+ const uint16_t expected_size)
+{
+ bool result = true;
+ struct ffa_value ret = ffa_partition_info_get(uuid);
+
+ if (ffa_func_id(ret) == FFA_SUCCESS_SMC32) {
+ if (ffa_partition_info_count(ret) != expected_size) {
+ ERROR("Unexpected number of partitions %d\n",
+ ffa_partition_info_count(ret));
+ return false;
+ }
+ if (ffa_partition_info_desc_size(ret) !=
+ sizeof(struct ffa_partition_info)) {
+ ERROR("Unexpected partition info descriptor size %d\n",
+ ffa_partition_info_desc_size(ret));
+ return false;
+ }
+ const struct ffa_partition_info *info =
+ (const struct ffa_partition_info *)(mb->recv);
+
+ for (unsigned int i = 0U; i < expected_size; i++) {
+ if (!ffa_compare_partition_info(uuid, &info[i], &expected[i]))
+ result = false;
+ }
+ }
+
+ ret = ffa_rx_release();
+ if (is_ffa_call_error(ret)) {
+ ERROR("Failed to release RX buffer\n");
+ result = false;
+ }
+ return result;
+}
+
+static bool configure_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest,
+ bool enable)
+{
+ struct ffa_value ret_values;
+
+ ret_values = cactus_interrupt_cmd(source, dest, IRQ_TWDOG_INTID,
+ enable, INTERRUPT_TYPE_IRQ);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while configuring"
+ " TWDOG interrupt\n");
+ return false;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ ERROR("Failed to configure Trusted Watchdog interrupt\n");
+ return false;
}
+ return true;
+}
+
+bool enable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest)
+{
+ return configure_trusted_wdog_interrupt(source, dest, true);
+}
+
+bool disable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest)
+{
+ return configure_trusted_wdog_interrupt(source, dest, false);
+}
+
+/**
+ * Initializes receiver permissions in a memory transaction descriptor, using
+ * `mem_func` to determine the appropriate permissions.
+ */
+struct ffa_memory_access ffa_memory_access_init_permissions_from_mem_func(
+ ffa_id_t receiver_id, uint32_t mem_func)
+{
+
+ enum ffa_instruction_access instruction_access =
+ FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED;
+ enum ffa_data_access data_access =
+ (mem_func == FFA_MEM_DONATE_SMC32)
+ ? FFA_DATA_ACCESS_NOT_SPECIFIED
+ : FFA_DATA_ACCESS_RW;
- return memory_send(memory_region, mem_func, fragment_length,
- total_length);
+ return ffa_memory_access_init(receiver_id, data_access,
+ instruction_access, 0, NULL);
}
diff --git a/tftf/tests/runtime_services/secure_service/spm_test_helpers.c b/tftf/tests/runtime_services/secure_service/spm_test_helpers.c
new file mode 100644
index 000000000..054e774a7
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/spm_test_helpers.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+
+#include <power_management.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+static struct mailbox_buffers test_mb = {.send = NULL, .recv = NULL};
+
+bool reset_tftf_mailbox(void)
+{
+ if (is_ffa_call_error(ffa_rxtx_unmap())) {
+ return false;
+ }
+
+ test_mb.send = NULL;
+ test_mb.recv = NULL;
+
+ return true;
+}
+
+bool get_tftf_mailbox(struct mailbox_buffers *mb)
+{
+ struct ffa_value ret;
+
+ if (test_mb.recv == NULL || test_mb.send == NULL) {
+ CONFIGURE_AND_MAP_MAILBOX(test_mb, PAGE_SIZE, ret);
+ if (is_ffa_call_error(ret)) {
+ return false;
+ }
+ }
+
+ *mb = test_mb;
+
+ return true;
+}
+
+test_result_t check_spmc_testing_set_up(
+ uint32_t ffa_version_major, uint32_t ffa_version_minor,
+ const struct ffa_uuid *ffa_uuids, size_t ffa_uuids_size)
+{
+ struct mailbox_buffers mb;
+
+ if (ffa_uuids == NULL) {
+ ERROR("Invalid parameter ffa_uuids!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(ffa_version_major,
+ ffa_version_minor);
+
+ /**********************************************************************
+ * If OP-TEE is SPMC skip the current test.
+ **********************************************************************/
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ GET_TFTF_MAILBOX(mb);
+
+ for (unsigned int i = 0U; i < ffa_uuids_size; i++)
+ SKIP_TEST_IF_FFA_ENDPOINT_NOT_DEPLOYED(*mb, ffa_uuids[i]);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t spm_run_multi_core_test(uintptr_t cpu_on_handler,
+ event_t *cpu_done)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos, cpu_node, mpidr;
+ int32_t ret;
+
+ VERBOSE("Powering on all cpus.\n");
+
+ for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ tftf_init_event(&cpu_done[i]);
+ }
+
+ /* Power on each secondary CPU one after the other. */
+ for_each_cpu(cpu_node) {
+ mpidr = tftf_get_mpidr_from_node(cpu_node);
+ if (mpidr == lead_mpid) {
+ continue;
+ }
+
+ ret = tftf_cpu_on(mpidr, cpu_on_handler, 0U);
+ if (ret != 0) {
+ ERROR("tftf_cpu_on mpidr 0x%x returns %d\n",
+ mpidr, ret);
+ }
+
+ /* Wait for the secondary CPU to be ready. */
+ core_pos = platform_get_core_pos(mpidr);
+ tftf_wait_for_event(&cpu_done[core_pos]);
+ }
+
+ VERBOSE("Done exiting.\n");
+
+ return TEST_RESULT_SUCCESS;
+}
+
+bool spm_core_sp_init(ffa_id_t sp_id)
+{
+ /*
+ * Secure Partitions secondary ECs need one round of ffa_run to reach
+ * the message loop.
+ */
+ if (sp_id != SP_ID(1)) {
+ uint32_t core_pos = get_current_core_id();
+ struct ffa_value ret = ffa_run(sp_id, core_pos);
+
+ if (ffa_func_id(ret) != FFA_MSG_WAIT) {
+ ERROR("Failed to run SP%x on core %u\n",
+ sp_id, core_pos);
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
index 0a722e497..1f8e81c1e 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_direct_messaging.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -14,6 +14,7 @@
#include <lib/events.h>
#include <lib/power_management.h>
#include <platform.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
#define ECHO_VAL1 U(0xa0a0a0a0)
@@ -24,14 +25,13 @@ static const struct ffa_uuid expected_sp_uuids[] = {
{PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
};
-
static event_t cpu_booted[PLATFORM_CORE_COUNT];
-static test_result_t send_cactus_echo_cmd(ffa_vm_id_t sender,
- ffa_vm_id_t dest,
+static test_result_t send_cactus_echo_cmd(ffa_id_t sender,
+ ffa_id_t dest,
uint64_t value)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ret = cactus_echo_send_cmd(sender, dest, value);
/*
@@ -92,12 +92,12 @@ test_result_t test_ffa_direct_messaging(void)
* otherwise.
* For the CACTUS_SUCCESS response, the test returns TEST_RESULT_SUCCESS.
*/
-static test_result_t send_cactus_req_echo_cmd(ffa_vm_id_t sender,
- ffa_vm_id_t dest,
- ffa_vm_id_t echo_dest,
+static test_result_t send_cactus_req_echo_cmd(ffa_id_t sender,
+ ffa_id_t dest,
+ ffa_id_t echo_dest,
uint64_t value)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ret = cactus_req_echo_send_cmd(sender, dest, echo_dest, value);
@@ -128,13 +128,13 @@ test_result_t test_ffa_sp_to_sp_direct_messaging(void)
* The following the tests are intended to test the handling of a
* direct message request with a VM's ID as a the sender.
*/
- result = send_cactus_req_echo_cmd(HYP_ID + 1, SP_ID(2), SP_ID(3),
+ result = send_cactus_req_echo_cmd(VM_ID(1), SP_ID(2), SP_ID(3),
ECHO_VAL2);
if (result != TEST_RESULT_SUCCESS) {
return result;
}
- result = send_cactus_req_echo_cmd(HYP_ID + 2, SP_ID(3), SP_ID(1),
+ result = send_cactus_req_echo_cmd(VM_ID(2), SP_ID(3), SP_ID(1),
ECHO_VAL3);
return result;
@@ -142,7 +142,7 @@ test_result_t test_ffa_sp_to_sp_direct_messaging(void)
test_result_t test_ffa_sp_to_sp_deadlock(void)
{
- smc_ret_values ret;
+ struct ffa_value ret;
/**********************************************************************
* Check SPMC has ffa_version and expected FFA endpoints are deployed.
@@ -165,14 +165,12 @@ test_result_t test_ffa_sp_to_sp_deadlock(void)
/**
* Handler that is passed during tftf_cpu_on to individual CPU cores.
* Runs a specific core and send a direct message request.
- * Expects core_pos | SP_ID as a response.
*/
static test_result_t cpu_on_handler(void)
{
- unsigned int mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int core_pos = get_current_core_id();
test_result_t ret = TEST_RESULT_SUCCESS;
- smc_ret_values ffa_ret;
+ struct ffa_value ffa_ret;
/*
* Send a direct message request to SP1 (MP SP) from current physical
@@ -255,48 +253,9 @@ out:
*/
test_result_t test_ffa_secondary_core_direct_msg(void)
{
- unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos, cpu_node, mpidr;
- int32_t ret;
-
/**********************************************************************
* Check SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
-
- for (unsigned int i = 0U; i < PLATFORM_CORE_COUNT; i++) {
- tftf_init_event(&cpu_booted[i]);
- }
-
- for_each_cpu(cpu_node) {
- mpidr = tftf_get_mpidr_from_node(cpu_node);
- if (mpidr == lead_mpid) {
- continue;
- }
-
- ret = tftf_cpu_on(mpidr, (uintptr_t)cpu_on_handler, 0U);
- if (ret != 0) {
- ERROR("tftf_cpu_on mpidr 0x%x returns %d\n", mpidr, ret);
- }
- }
-
- VERBOSE("Waiting secondary CPUs to turn off ...\n");
-
- for_each_cpu(cpu_node) {
- mpidr = tftf_get_mpidr_from_node(cpu_node);
- if (mpidr == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(mpidr);
- tftf_wait_for_event(&cpu_booted[core_pos]);
- }
-
- VERBOSE("Done exiting.\n");
-
- /**********************************************************************
- * All tests passed.
- **********************************************************************/
-
- return TEST_RESULT_SUCCESS;
+ return spm_run_multi_core_test((uintptr_t)cpu_on_handler, cpu_booted);
}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c b/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c
new file mode 100644
index 000000000..0a345d4b2
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_exceptions.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2022-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <cactus_test_cmds.h>
+#include "ffa_helpers.h"
+#include <debug.h>
+#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <host_realm_helper.h>
+#include <irq.h>
+#include <platform.h>
+#include <smccc.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+
+static __aligned(PAGE_SIZE) uint64_t share_page[PAGE_SIZE / sizeof(uint64_t)];
+
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+};
+
+/**
+ * @Test_Aim@ Check a realm region cannot be accessed from a secure partition.
+ *
+ * This test shares a TFTF allocated buffer with a secure partition through
+ * FF-A memory sharing operation. The buffer is initially marked NS in the GPT
+ * and transitioned to realm after sharing. Then, the SP is invoked to retrieve
+ * the region (map it to its S2 translation regime), and maps it to its secure
+ * S1 translation regime. It then attempts a read access which results in the
+ * PE triggering a GPF caught by a custom synchronous abort handler.
+ *
+ */
+test_result_t rl_memory_cannot_be_accessed_in_s(void)
+{
+ struct ffa_memory_region_constituent constituents[] = {
+ {
+ (void *)share_page, 1, 0
+ }
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ ffa_memory_handle_t handle;
+ struct mailbox_buffers mb;
+ struct ffa_value ret;
+ u_register_t retmm;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_SHARE_SMC32);
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ handle = memory_init_and_send(mb.send, PAGE_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ FFA_MEM_SHARE_SMC32, &ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("TFTF - Handle: %llx Address: %p\n",
+ handle, constituents[0].address);
+
+ host_rmi_init_cmp_result();
+
+ /* Delegate the shared page to Realm. */
+ retmm = host_rmi_granule_delegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("Granule delegate failed, ret=0x%lx\n", retmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Retrieve the shared page and attempt accessing it.
+ * Tell SP to expect an exception.
+ */
+ ret = cactus_mem_send_cmd(SENDER, RECEIVER, FFA_MEM_SHARE_SMC32,
+ handle, 0, 1, true);
+
+ /* Undelegate the shared page. */
+ retmm = host_rmi_granule_undelegate((u_register_t)&share_page);
+ if (retmm != 0UL) {
+ ERROR("Granule undelegate failed, ret=0x%lx\n", retmm);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
+ ERROR("Memory reclaim failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Expect success response with value 1 hinting an exception
+ * triggered while the SP accessed the region.
+ */
+ if (!(cactus_get_response(ret) == CACTUS_SUCCESS &&
+ cactus_error_code(ret) == 1)) {
+ ERROR("Exceptions test failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return host_cmp_result();
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_features.c b/tftf/tests/runtime_services/secure_service/test_ffa_features.c
deleted file mode 100644
index e4cd845fd..000000000
--- a/tftf/tests/runtime_services/secure_service/test_ffa_features.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <spm_common.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-test_result_t test_ffa_features(void)
-{
- SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
-
- /* Check if SPMC is OP-TEE at S-EL1 */
- if (check_spmc_execution_level()) {
- /* FFA_FEATURES is not yet supported in OP-TEE */
- return TEST_RESULT_SUCCESS;
- }
-
- smc_ret_values ffa_ret;
- const struct ffa_features_test *ffa_feature_test_target;
- unsigned int i, test_target_size =
- get_ffa_feature_test_target(&ffa_feature_test_target);
-
- for (i = 0U; i < test_target_size; i++) {
- ffa_ret = ffa_features(ffa_feature_test_target[i].feature);
- if (ffa_func_id(ffa_ret) != ffa_feature_test_target[i].expected_ret) {
- tftf_testcase_printf("%s returned %x, expected %x\n",
- ffa_feature_test_target[i].test_name,
- ffa_func_id(ffa_ret),
- ffa_feature_test_target[i].expected_ret);
- return TEST_RESULT_FAIL;
- }
- if ((ffa_feature_test_target[i].expected_ret == FFA_ERROR) &&
- (ffa_error_code(ffa_ret) != FFA_ERROR_NOT_SUPPORTED)) {
- tftf_testcase_printf("%s failed for the wrong reason: "
- "returned %x, expected %x\n",
- ffa_feature_test_target[i].test_name,
- ffa_error_code(ffa_ret),
- FFA_ERROR_NOT_SUPPORTED);
- return TEST_RESULT_FAIL;
- }
- }
-
- return TEST_RESULT_SUCCESS;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c b/tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c
new file mode 100644
index 000000000..c6c719428
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_group0_interrupts.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <platform.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+#define SP_SLEEP_TIME 200U
+#define NS_TIME_SLEEP 200U
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+
+static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+
+test_result_t test_ffa_group0_interrupt_sp_running(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Send request to first Cactus SP to sleep for 200ms.*/
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SP_SLEEP_TIME);
+
+ /*
+ * SBSA secure watchdog timer fires every 100ms. Hence a Group0 secure
+ * interrupt should trigger during this time.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for sleep command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SP_SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_ffa_group0_interrupt_in_nwd(void)
+{
+ uint64_t time1;
+ volatile uint64_t time2, time_lapsed;
+ uint64_t timer_freq = read_cntfrq_el0();
+
+ time1 = syscounter_read();
+
+ /*
+ * Sleep for NS_TIME_SLEEP ms. This ensures SBSA secure wdog timer
+ * triggers during this time.
+ */
+ waitms(NS_TIME_SLEEP);
+ time2 = syscounter_read();
+
+ /* Lapsed time should be at least equal to sleep time. */
+ time_lapsed = ((time2 - time1) * 1000) / timer_freq;
+
+ if (time_lapsed < NS_TIME_SLEEP) {
+ ERROR("Time elapsed less than expected value: %llu vs %u\n",
+ time_lapsed, NS_TIME_SLEEP);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c b/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c
index 7c70de2c3..454ea0570 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_interrupts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,6 +7,7 @@
#include <cactus_test_cmds.h>
#include <ffa_endpoints.h>
#include <ffa_helpers.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
#include <timer.h>
@@ -14,10 +15,14 @@ static volatile int timer_irq_received;
#define SENDER HYP_ID
#define RECEIVER SP_ID(1)
-#define SLEEP_TIME 200U
+#define RECEIVER_2 SP_ID(2)
+#define RECEIVER_3 SP_ID(3)
+#define TIMER_DURATION 50U
+#define SLEEP_TIME 100U
+#define SLEEP_TIME_FWD 200U
static const struct ffa_uuid expected_sp_uuids[] = {
- {PRIMARY_UUID}
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
};
/*
@@ -31,69 +36,392 @@ static int timer_handler(void *data)
return 0;
}
+static int program_timer(unsigned long milli_secs)
+{
+ /* Program timer. */
+ timer_irq_received = 0;
+ tftf_timer_register_handler(timer_handler);
+
+ return tftf_program_timer(milli_secs);
+}
+
+static int check_timer_interrupt(void)
+{
+ /* Check that the timer interrupt has been handled in NWd(TFTF). */
+ tftf_cancel_timer();
+ tftf_timer_unregister_handler();
+
+ return timer_irq_received;
+}
+
/*
- * @Test_Aim@ Test non-secure interrupts while executing Secure Partition.
+ * @Test_Aim@ Test non-secure interrupts while a Secure Partition capable
+ * of managed exit is executing.
*
- * 1. Enable managed exit interrupt by sending interrupt_enable command to
- * Cactus.
- *
- * 2. Register a handler for the non-secure timer interrupt. Program it to fire
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
* in a certain time.
*
- * 3. Send a blocking request to Cactus to execute in busy loop.
+ * 2. Send a direct request to Cactus SP to execute in busy loop.
*
- * 4. While executing in busy loop, the non-secure timer should
+ * 3. While executing in busy loop, the non-secure timer should
* fire and trap into SPM running at S-EL2 as FIQ.
*
- * 5. SPM injects a managed exit virtual FIQ into Cactus (as configured in the
+ * 4. SPM injects a managed exit virtual FIQ into Cactus (as configured in the
* interrupt enable call), causing it to run its interrupt handler.
*
- * 6. Cactus's managed exit handler acknowledges interrupt arrival by
+ * 5. Cactus's managed exit handler acknowledges interrupt arrival by
* requesting the interrupt id to the SPMC, and check if it is the
* MANAGED_EXIT_INTERRUPT_ID.
*
- * 7. Check whether the pending non-secure timer interrupt successfully got
+ * 6. Check whether the pending non-secure timer interrupt successfully got
* handled in TFTF.
*
- * 8. Send a direct message request command to resume Cactus's execution.
- * It resumes in the sleep loop and completes it. It then returns with
- * a direct message response. Check if time lapsed is greater than
- * sleeping time.
+ * 7. Send a new sleep command to Cactus SP. An error response must be sent
+ * back by the Cactus SP with CACTUS_ERROR_TEST as the error code.
+ *
+ * 8. Send a command asking the SP to resume after managed exit. SP resumes in
+ * the suspended sleep loop and completes it. It then returns with a direct
+ * message response. Check if time lapsed is greater than sleeping time.
*
*/
-test_result_t test_ffa_ns_interrupt(void)
+test_result_t test_ffa_ns_interrupt_managed_exit(void)
{
int ret;
- smc_ret_values ret_values;
+ struct ffa_value ret_values;
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
- /* Enable managed exit interrupt as FIQ in the secure side. */
- ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, MANAGED_EXIT_INTERRUPT_ID,
- true, INTERRUPT_TYPE_FIQ);
+ /* Send request to primary Cactus to sleep for 100ms. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
if (!is_ffa_direct_response(ret_values)) {
return TEST_RESULT_FAIL;
}
- if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
- ERROR("Failed to enable Managed exit interrupt\n");
+ /*
+ * Managed exit interrupt occurs during this time, Cactus
+ * will respond with interrupt ID.
+ */
+ if (cactus_get_response(ret_values) != MANAGED_EXIT_INTERRUPT_ID) {
+ ERROR("Managed exit interrupt did not occur!\n");
return TEST_RESULT_FAIL;
}
- /* Program timer */
- timer_irq_received = 0;
- tftf_timer_register_handler(timer_handler);
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a command asking the SP to resume after managed exit. This
+ * effectively resumes the Cactus in the sleep routine. Note that
+ * Cactus should return error if the current endpoint sent a new
+ * command.
+ */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
+
+ if (cactus_get_response(ret_values) != CACTUS_ERROR &&
+ cactus_error_code(ret_values) != CACTUS_ERROR_TEST) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_resume_after_managed_exit(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test the scenario where a non-secure interrupt triggers while a
+ * Secure Partition,that specified action for NS interrupt as SIGNALABLE, is
+ * executing.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to Cactus SP to execute in busy loop.
+ *
+ * 3. While executing in busy loop, the non-secure timer should fire. Cactus SP
+ * should be preempted by non-secure interrupt.
+ *
+ * 4. Execution traps to SPMC running at S-EL2 as FIQ. SPMC returns control to
+ * the normal world through FFA_INTERRUPT ABI for it to handle the non-secure
+ * interrupt.
+ *
+ * 5. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 6. Resume the Cactus SP using FFA_RUN ABI for it to complete the sleep
+ * routine.
+ *
+ * 7. Ensure the Cactus SP sends the DIRECT RESPONSE message.
+ *
+ * 8. Check if time lapsed is greater than sleep time.
+ *
+ */
+test_result_t test_ffa_ns_interrupt_signaled(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+ unsigned int core_pos = get_current_core_id();
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to secondary Cactus to sleep for 100ms. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER_2, SLEEP_TIME);
+
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Cactus SP should be preempted by non-secure interrupt. SPMC
+ * returns control to the normal world through FFA_INTERRUPT ABI
+ * for it to handle the non-secure interrupt.
+ */
+ if (ffa_func_id(ret_values) != FFA_INTERRUPT) {
+ ERROR("Expected FFA_INTERRUPT as return status!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Ensure SPMC returns FFA_ERROR with BUSY error code when a direct
+ * request message is sent to the preempted SP.
+ */
+ ret_values = cactus_echo_send_cmd(SENDER, RECEIVER_2, ECHO_VAL1);
+
+ if ((ffa_func_id(ret_values) != FFA_ERROR) ||
+ (ffa_error_code(ret_values) != FFA_ERROR_BUSY)) {
+ ERROR("Expected FFA_ERROR(BUSY)! Got %x(%x)\n",
+ ffa_func_id(ret_values), ffa_error_code(ret_values));
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Resume the Cactus SP using FFA_RUN ABI for it to complete the
+ * sleep routine and send the direct response message.
+ */
+ VERBOSE("Resuming %x\n", RECEIVER_2);
+ ret_values = ffa_run(RECEIVER_2, core_pos);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ This test exercises the following scenario: Managed exit is
+ * supported by both SPs in a call chain. A non-secure interrupt triggers
+ * while the second SP is processing a direct request message sent by the first
+ * SP. We choose SP(1) as the first SP and SP(3) as the second SP.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to the first SP(i.e., SP(1)) to forward sleep command to
+ * the second SP(i.e., SP(3)).
+ *
+ * 3. While the second SP is running the busy loop, non-secure interrupt would
+ * trigger during this time.
+ *
+ * 4. The interrupt will be trapped to SPMC as FIQ. SPMC will inject the managed
+ * exit signal to the second SP through vIRQ conduit and perform eret to
+ * resume execution in the second SP.
+ *
+ * 5. The second SP sends the managed exit direct response to the first SP
+ * through its interrupt handler for managed exit.
+ *
+ * 6. SPMC proactively injects managed exit signal to the first SP through vFIQ
+ * conduit and resumes it using eret.
+ *
+ * 7. The first Cactus SP sends the managed exit direct response to TFTF through
+ * its interrupt handler for managed exit.
+ *
+ * 8. TFTF checks the return value in the direct message response from the first SP
+ * and ensures it is managed signal interrupt ID.
+ *
+ * 9. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 10. Send a dummy direct message request command to resume the first SP's execution.
+ *
+ * 11. The first SP direct message request returns with managed exit response. It
+ * then sends a dummy direct message request command to resume the second SP's
+ * execution.
+ *
+ * 12. The second SP resumes in the sleep routine and sends a direct message
+ * response to the first SP.
+ *
+ * 13. The first SP checks if time lapsed is not lesser than sleep time and if
+ * successful, sends direct message response to the TFTF.
+ *
+ * 14. TFTF ensures the direct message response did not return with an error.
+ *
+ */
+test_result_t test_ffa_ns_interrupt_managed_exit_chained(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
- ret = tftf_program_timer(100);
+ ret = program_timer(TIMER_DURATION);
if (ret < 0) {
ERROR("Failed to program timer (%d)\n", ret);
return TEST_RESULT_FAIL;
}
- /* Send request to primary Cactus to sleep for 200ms */
+ /*
+ * Send request to first Cactus SP to send request to another Cactus
+ * SP to sleep.
+ */
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER, RECEIVER_3,
+ SLEEP_TIME_FWD, true);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Managed exit interrupt occurs during this time, Cactus
+ * will respond with interrupt ID.
+ */
+ if (cactus_get_response(ret_values) != MANAGED_EXIT_INTERRUPT_ID) {
+ ERROR("Managed exit interrupt did not occur!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a command asking the SP to resume after managed exit. This
+ * effectively resumes the Cactus in the sleep routine. Note that
+ * Cactus should return error if the current endpoint sent a new
+ * command.
+ */
ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
+ if (cactus_get_response(ret_values) != CACTUS_ERROR &&
+ cactus_error_code(ret_values) != CACTUS_ERROR_TEST) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_resume_after_managed_exit(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ This test exercises the following scenario: Managed exit is
+ * supported by the first SP but not by the second SP in a call chain. A
+ * non-secure interrupt triggers while the second SP is processing a direct request
+ * message sent by the first SP. We choose SP(1) as the first SP and SP(2) as
+ * the second SP.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to the first SP(i.e., SP(1)) to forward sleep command to
+ * the second SP(i.e., SP(2)).
+ *
+ * 3. While the second SP is running the busy loop, non-secure interrupt would
+ * trigger during this time.
+ *
+ * 4. The interrupt will be trapped to SPMC as FIQ. SPMC finds the source of
+ * the interrupted direct message request and prepares the return status
+ * as FFA_INTERRUPT.
+ *
+ * 5. SPMC injects managed exit signal to the first SP through vFIQ
+ * conduit and resumes it using eret.
+ *
+ * 6. The first Cactus SP sends the managed exit direct response to TFTF through
+ * its interrupt handler for managed exit.
+ *
+ * 7. TFTF checks the return value in the direct message response from the first SP
+ * and ensures it is managed signal interrupt ID.
+ *
+ * 8. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 9. Send a dummy direct message request command to resume the first SP's execution.
+ *
+ * 10. The first SP direct message request returns with FFA_INTERRUPT status. It
+ * then resumes the second SP's execution using FFA_RUN ABI.
+ *
+ * 11. The second SP resumes in the sleep routine and sends a direct message
+ * response to the first SP.
+ *
+ * 12. The first SP checks if time lapsed is not lesser than sleep time and if
+ * successful, sends direct message response to the TFTF.
+ *
+ * 13. TFTF ensures the direct message response did not return with an error.
+ *
+ */
+test_result_t test_ffa_SPx_ME_SPy_signaled(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send request to first Cactus SP to send request to another Cactus
+ * SP to sleep.
+ */
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER, RECEIVER_2,
+ SLEEP_TIME_FWD, true);
+
if (!is_ffa_direct_response(ret_values)) {
return TEST_RESULT_FAIL;
}
@@ -107,42 +435,207 @@ test_result_t test_ffa_ns_interrupt(void)
return TEST_RESULT_FAIL;
}
- /* Check that the timer interrupt has been handled in NS-world (TFTF) */
- tftf_cancel_timer();
- tftf_timer_unregister_handler();
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a command asking the SP to resume after managed exit. This
+ * effectively resumes the Cactus in the sleep routine. Note that
+ * Cactus should return error if the current endpoint sent a new
+ * command.
+ */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER, SLEEP_TIME);
+
+ if (cactus_get_response(ret_values) != CACTUS_ERROR &&
+ cactus_error_code(ret_values) != CACTUS_ERROR_TEST) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_resume_after_managed_exit(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
- if (timer_irq_received == 0) {
+/*
+ * @Test_Aim@ This test exercises the following scenario: Managed exit is
+ * supported by the second SP but not by the first SP in a call chain. A non-secure
+ * interrupt triggers while the second SP is processing a direct request message
+ * sent by the first SP. We choose SP(2) as the first SP and SP(1) as the second SP.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to the first SP(i.e., SP(2)) to forward sleep command to
+ * the second SP(i.e., SP(1)).
+ *
+ * 3. While the second SP is running the busy loop, non-secure interrupt would
+ * trigger during this time.
+ *
+ * 4. The interrupt will be trapped to SPMC as FIQ. SPMC will inject the managed
+ * exit signal to the second SP through vFIQ conduit and perform eret to
+ * resume execution in the second SP.
+ *
+ * 5. The second SP sends the managed exit direct response to the first SP
+ * through its interrupt handler for managed exit. Note that SPMC does not
+ * change the state of the non-secure interrupt at the GIC interface. SPMC
+ * resumes the first SP but execution immediately traps to fiq handler of
+ * SPMC.
+ *
+ * 6. SPMC returns control to the normal world with the help of SPMD through
+ * FFA_INTERRUPT ABI for TFTF to handle the non-secure interrupt.
+ *
+ * 7. TFTF checks the direct message request to the first SP returned with a
+ * FFA_INTERRUPT status.
+ *
+ * 8. Check whether the pending non-secure timer interrupt successfully got
+ * handled in the normal world by TFTF.
+ *
+ * 9. Resume the first Cactus SP using FFA_RUN ABI.
+ *
+ * 10. The first SP direct message request returns with managed exit response. It
+ * then sends a dummy direct message request command to resume the second SP's
+ * execution.
+ *
+ * 11. The second SP resumes in the sleep routine and sends a direct message
+ * response to the first SP.
+ *
+ * 12. The first SP checks if time lapsed is not lesser than sleep time and if
+ * successful, sends direct message response to the TFTF.
+ *
+ * 13. TFTF ensures the direct message response did not return with an error.
+ *
+ */
+test_result_t test_ffa_SPx_signaled_SPy_ME(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+ unsigned int core_pos = get_current_core_id();
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a request to the first Cactus SP to send request to another Cactus
+ * SP to sleep.
+ */
+ VERBOSE("Forward sleep command\n");
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER_2, RECEIVER,
+ SLEEP_TIME_FWD, true);
+
+ if (check_timer_interrupt() == 0) {
ERROR("Timer interrupt hasn't actually been handled.\n");
return TEST_RESULT_FAIL;
}
/*
- * Send a dummy direct message request to relinquish CPU cycles.
- * This resumes Cactus in the sleep routine.
+ * Cactus SP should be preempted by non-secure interrupt. SPMC
+ * returns control to the normal world through FFA_INTERRUPT ABI
+ * for it to handle the non-secure interrupt.
*/
- ret_values = ffa_msg_send_direct_req64(SENDER, RECEIVER,
- 0, 0, 0, 0, 0);
+ if (ffa_func_id(ret_values) != FFA_INTERRUPT) {
+ ERROR("Expected FFA_INTERRUPT as return status!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Ensure SPMC returns FFA_ERROR with BUSY error code when a direct
+ * request message is sent to the preempted SP.
+ */
+ ret_values = cactus_echo_send_cmd(SENDER, RECEIVER_2, ECHO_VAL1);
+
+ if ((ffa_func_id(ret_values) != FFA_ERROR) ||
+ (ffa_error_code(ret_values) != FFA_ERROR_BUSY)) {
+ ERROR("Expected FFA_ERROR(BUSY)! Got %x(%x)\n",
+ ffa_func_id(ret_values), ffa_error_code(ret_values));
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Resume the Cactus SP using FFA_RUN ABI for it to complete the
+ * sleep routine and send the direct response message.
+ */
+ ret_values = ffa_run(RECEIVER_2, core_pos);
if (!is_ffa_direct_response(ret_values)) {
return TEST_RESULT_FAIL;
}
- /* Make sure elapsed time not less than sleep time */
- if (cactus_get_response(ret_values) < SLEEP_TIME) {
- ERROR("Lapsed time less than requested sleep time\n");
+ if (cactus_get_response(ret_values) == CACTUS_ERROR) {
return TEST_RESULT_FAIL;
}
- /* Disable Managed exit interrupt */
- ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, MANAGED_EXIT_INTERRUPT_ID,
- false, 0);
+ return TEST_RESULT_SUCCESS;
+}
+/*
+ * @Test_Aim@ Test the scenario where a non-secure interrupt triggers while a
+ * Secure Partition,that specified action for NS interrupt as QUEUED, is
+ * executing.
+ *
+ * 1. Register a handler for the non-secure timer interrupt. Program it to fire
+ * in a certain time.
+ *
+ * 2. Send a direct request to Cactus SP to execute in busy loop.
+ *
+ * 3. While executing in busy loop, the non-secure timer should fire. Cactus SP
+ * should be NOT be preempted by non-secure interrupt.
+ *
+ * 4. Cactus SP should complete the sleep routine and return with a direct
+ * response message.
+ *
+ * 5. Ensure that elapsed time in the sleep routine is not less than sleep time
+ * requested through direct message request.
+ *
+ */
+test_result_t test_ffa_ns_interrupt_queued(void)
+{
+ int ret;
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ ret = program_timer(TIMER_DURATION);
+ if (ret < 0) {
+ ERROR("Failed to program timer (%d)\n", ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to a Cactus SP to sleep for 100ms. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER_3, SLEEP_TIME);
+
+ if (check_timer_interrupt() == 0) {
+ ERROR("Timer interrupt hasn't actually been handled.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Cactus SP should not be preempted by non-secure interrupt. It
+ * should complete the sleep routine and return with a direct response
+ * message.
+ */
if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected direct message response\n");
return TEST_RESULT_FAIL;
}
- if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
- ERROR("Failed to disable Managed exit interrupt\n");
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
return TEST_RESULT_FAIL;
}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
index f126c57d6..af5a077f0 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
@@ -1,16 +1,26 @@
/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include "arch_features.h"
+#include "arch_helpers.h"
+#include "ffa_helpers.h"
+#include "ffa_svc.h"
+#include "stdint.h"
+#include "utils_def.h"
#include <debug.h>
+#include "ffa_helpers.h"
+#include <sync.h>
#include <cactus_test_cmds.h>
#include <ffa_endpoints.h>
+#include <host_realm_rmi.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
#include <tftf_lib.h>
-#include <spm_common.h>
#include <xlat_tables_defs.h>
#define MAILBOX_SIZE PAGE_SIZE
@@ -18,12 +28,134 @@
#define SENDER HYP_ID
#define RECEIVER SP_ID(1)
+/*
+ * A number of pages that is large enough that it must take two fragments to
+ * share.
+ */
+#define FRAGMENTED_SHARE_PAGE_COUNT \
+ (sizeof(struct ffa_memory_region) / \
+ sizeof(struct ffa_memory_region_constituent))
+
static const struct ffa_uuid expected_sp_uuids[] = {
{PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
};
/* Memory section to be used for memory share operations */
-static __aligned(PAGE_SIZE) uint8_t share_page[PAGE_SIZE];
+static __aligned(PAGE_SIZE) uint8_t
+ share_page[PAGE_SIZE * FRAGMENTED_SHARE_PAGE_COUNT];
+static __aligned(PAGE_SIZE) uint8_t donate_page[PAGE_SIZE];
+static __aligned(PAGE_SIZE) uint8_t consecutive_donate_page[PAGE_SIZE];
+static __aligned(PAGE_SIZE) uint8_t four_share_pages[PAGE_SIZE * 4];
+
+static bool gpc_abort_triggered;
+
+static bool check_written_words(uint32_t *ptr, uint32_t word, uint32_t wcount)
+{
+ VERBOSE("TFTF - Memory contents after SP use:\n");
+ for (unsigned int i = 0U; i < wcount; i++) {
+ VERBOSE(" %u: %x\n", i, ptr[i]);
+
+ /* Verify content of memory is as expected. */
+ if (ptr[i] != word) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool test_memory_send_expect_denied(uint32_t mem_func,
+ void *mem_ptr,
+ ffa_id_t borrower)
+{
+ struct ffa_value ret;
+ struct mailbox_buffers mb;
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)mem_ptr, 1, 0}
+ };
+ ffa_memory_handle_t handle;
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
+
+ GET_TFTF_MAILBOX(mb);
+
+ handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
+ MAILBOX_SIZE, SENDER, &receiver, 1,
+ constituents, constituents_count,
+ mem_func, &ret);
+
+ if (handle != FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("Received a valid FF-A memory handle, and that isn't "
+ "expected.\n");
+ return false;
+ }
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool data_abort_handler(void)
+{
+ uint64_t esr_elx = IS_IN_EL2() ? read_esr_el2() : read_esr_el1();
+
+ VERBOSE("%s esr_elx %llx\n", __func__, esr_elx);
+
+ if (EC_BITS(esr_elx) == EC_DABORT_CUR_EL) {
+ /* Synchronous data abort triggered by Granule protection */
+ if ((ISS_BITS(esr_elx) & ISS_DFSC_MASK) == DFSC_GPF_DABORT) {
+ VERBOSE("%s GPF Data Abort caught to address: %llx\n",
+ __func__, (uint64_t)read_far_el2());
+ gpc_abort_triggered = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool get_gpc_abort_triggered(void)
+{
+ bool ret = gpc_abort_triggered;
+
+ gpc_abort_triggered = false;
+
+ return ret;
+}
+
+/**
+ * Test invocation to FF-A memory sharing interfaces that should return in an
+ * error.
+ */
+test_result_t test_share_forbidden_ranges(void)
+{
+ const uintptr_t forbidden_address[] = {
+ /* Cactus SP memory. */
+ (uintptr_t)0x7200000,
+ /* SPMC Memory. */
+ (uintptr_t)0x6000000,
+ /* NS memory defined in cactus tertiary. */
+ (uintptr_t)0x0000880080001000,
+ };
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ for (unsigned i = 0; i < 3; i++) {
+ if (!test_memory_send_expect_denied(
+ FFA_MEM_SHARE_SMC32, (void *)forbidden_address[i],
+ RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
/**
* Tests that it is possible to share memory with SWd from NWd.
@@ -43,68 +175,111 @@ static __aligned(PAGE_SIZE) uint8_t share_page[PAGE_SIZE];
* Hypervisor (sitting in EL2) would relinquish access from EL1/EL0
* FF-A endpoint at relevant moment.
*/
-static test_result_t test_memory_send_sp(uint32_t mem_func)
+static test_result_t test_memory_send_sp(uint32_t mem_func, ffa_id_t borrower,
+ struct ffa_memory_region_constituent *constituents,
+ size_t constituents_count)
{
- smc_ret_values ret;
+ struct ffa_value ret;
ffa_memory_handle_t handle;
uint32_t *ptr;
struct mailbox_buffers mb;
+ unsigned int rme_supported = get_armv9_2_feat_rme_support();
+ const bool check_gpc_fault =
+ mem_func != FFA_MEM_SHARE_SMC32 &&
+ rme_supported != 0U;
+
+ /* Arbitrarily write 5 words after using memory. */
+ const uint32_t nr_words_to_write = 5;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
/***********************************************************************
* Check if SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
GET_TFTF_MAILBOX(mb);
- struct ffa_memory_region_constituent constituents[] = {
- {(void *)share_page, 1, 0}
- };
+ /*
+ * If the RME is enabled for the platform under test, check that the
+ * GPCs are working as expected, as such setup the exception handler.
+ */
+ if (check_gpc_fault) {
+ register_custom_sync_exception_handler(data_abort_handler);
+ }
- const uint32_t constituents_count = sizeof(constituents) /
- sizeof(struct ffa_memory_region_constituent);
+ for (size_t i = 0; i < constituents_count; i++) {
+ VERBOSE("Sharing Address: %p\n", constituents[i].address);
+ ptr = (uint32_t *)constituents[i].address;
+ for (size_t j = 0; j < nr_words_to_write; j++) {
+ ptr[j] = mem_func + 0xFFA;
+ }
+ }
handle = memory_init_and_send((struct ffa_memory_region *)mb.send,
- MAILBOX_SIZE, SENDER, RECEIVER,
+ MAILBOX_SIZE, SENDER, &receiver, 1,
constituents, constituents_count,
- mem_func);
+ mem_func, &ret);
if (handle == FFA_MEMORY_HANDLE_INVALID) {
return TEST_RESULT_FAIL;
}
- VERBOSE("TFTF - Handle: %llx\nTFTF - Address: %p\n",
- handle, constituents[0].address);
+ VERBOSE("TFTF - Handle: %llx\n", handle);
ptr = (uint32_t *)constituents[0].address;
- ret = cactus_mem_send_cmd(SENDER, RECEIVER, mem_func, handle);
-
- if (!is_ffa_direct_response(ret)) {
- return TEST_RESULT_FAIL;
- }
+ ret = cactus_mem_send_cmd(SENDER, borrower, mem_func, handle, 0,
+ nr_words_to_write, false);
- if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ if (!is_ffa_direct_response(ret) ||
+ cactus_get_response(ret) != CACTUS_SUCCESS) {
+ ffa_mem_reclaim(handle, 0);
ERROR("Failed memory send operation!\n");
return TEST_RESULT_FAIL;
}
/*
- * Print 5 words from the memory region to validate SP wrote to the
- * memory region.
+ * If there is RME support, look to trigger an exception as soon as the
+ * security state is update, due to GPC fault.
*/
- VERBOSE("TFTF - Memory contents after SP use:\n");
- for (unsigned int i = 0U; i < 5U; i++)
- VERBOSE(" %u: %x\n", i, ptr[i]);
+ if (check_gpc_fault) {
+ *ptr = 0xBEEF;
+ }
- /* To make the compiler happy in case it is not a verbose build */
- if (LOG_LEVEL < LOG_LEVEL_VERBOSE)
- (void)ptr;
+ if (mem_func != FFA_MEM_DONATE_SMC32) {
- if (mem_func != FFA_MEM_DONATE_SMC32 &&
- is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
+ /* Reclaim memory entirely before checking its state. */
+ if (is_ffa_call_error(ffa_mem_reclaim(handle, 0))) {
tftf_testcase_printf("Couldn't reclaim memory\n");
return TEST_RESULT_FAIL;
+ }
+
+ for (uint32_t i = 0; i < constituents_count; i++) {
+ ptr = constituents[i].address;
+
+ /*
+ * Check that borrower used the memory as expected
+ * for FFA_MEM_SHARE test.
+ */
+ if (mem_func == FFA_MEM_SHARE_SMC32 &&
+ !check_written_words(ptr,
+ mem_func + 0xFFAU,
+ nr_words_to_write)) {
+ ERROR("Fail because of state of memory.\n");
+ return TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ if (check_gpc_fault) {
+ unregister_custom_sync_exception_handler();
+ if (!get_gpc_abort_triggered()) {
+ ERROR("No exception due to GPC for lend/donate with RME.\n");
+ return TEST_RESULT_FAIL;
+ }
}
return TEST_RESULT_SUCCESS;
@@ -112,17 +287,78 @@ static test_result_t test_memory_send_sp(uint32_t mem_func)
test_result_t test_mem_share_sp(void)
{
- return test_memory_send_sp(FFA_MEM_SHARE_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ return test_memory_send_sp(FFA_MEM_SHARE_SMC32, RECEIVER, constituents,
+ constituents_count);
}
test_result_t test_mem_lend_sp(void)
{
- return test_memory_send_sp(FFA_MEM_LEND_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ return test_memory_send_sp(FFA_MEM_LEND_SMC32, RECEIVER, constituents,
+ constituents_count);
}
test_result_t test_mem_donate_sp(void)
{
- return test_memory_send_sp(FFA_MEM_DONATE_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)donate_page, 1, 0}
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ return test_memory_send_sp(FFA_MEM_DONATE_SMC32, RECEIVER, constituents,
+ constituents_count);
+}
+
+test_result_t test_consecutive_donate(void)
+{
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)consecutive_donate_page, 1, 0}
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ test_result_t ret = test_memory_send_sp(FFA_MEM_DONATE_SMC32, SP_ID(1),
+ constituents,
+ constituents_count);
+
+ if (ret != TEST_RESULT_SUCCESS) {
+ ERROR("Failed at first attempting of sharing.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!test_memory_send_expect_denied(FFA_MEM_DONATE_SMC32,
+ consecutive_donate_page,
+ SP_ID(1))) {
+ ERROR("Memory was successfully donated again from the NWd, to "
+ "the same borrower.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!test_memory_send_expect_denied(FFA_MEM_DONATE_SMC32,
+ consecutive_donate_page,
+ SP_ID(2))) {
+ ERROR("Memory was successfully donated again from the NWd, to "
+ "another borrower.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
}
/*
@@ -130,44 +366,798 @@ test_result_t test_mem_donate_sp(void)
* Cactus SP should reply to TFTF on whether the test succeeded or not.
*/
static test_result_t test_req_mem_send_sp_to_sp(uint32_t mem_func,
- ffa_vm_id_t sender_sp,
- ffa_vm_id_t receiver_sp)
+ ffa_id_t sender_sp,
+ ffa_id_t receiver_sp,
+ bool non_secure)
{
- smc_ret_values ret;
+ struct ffa_value ret;
/***********************************************************************
* Check if SPMC's ffa_version and presence of expected FF-A endpoints.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
- receiver_sp);
+ receiver_sp, non_secure);
if (!is_ffa_direct_response(ret)) {
return TEST_RESULT_FAIL;
}
if (cactus_get_response(ret) == CACTUS_ERROR) {
+ ERROR("Failed sharing memory between SPs. Error code: %d\n",
+ cactus_error_code(ret));
return TEST_RESULT_FAIL;
}
return TEST_RESULT_SUCCESS;
}
+/*
+ * Test requests a memory send operation from SP to VM.
+ * The tests expects cactus to reply CACTUS_ERROR, providing FF-A error code of
+ * the last memory send FF-A call that cactus performed.
+ */
+static test_result_t test_req_mem_send_sp_to_vm(uint32_t mem_func,
+ ffa_id_t sender_sp,
+ ffa_id_t receiver_vm)
+{
+ struct ffa_value ret;
+
+ /**********************************************************************
+ * Check if SPMC's ffa_version and presence of expected FF-A endpoints.
+ *********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ ret = cactus_req_mem_send_send_cmd(HYP_ID, sender_sp, mem_func,
+ receiver_vm, false);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR &&
+ cactus_error_code(ret) == FFA_ERROR_DENIED) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ tftf_testcase_printf("Did not get the expected error, "
+ "mem send returned with %d\n",
+ cactus_get_response(ret));
+ return TEST_RESULT_FAIL;
+}
+
test_result_t test_req_mem_share_sp_to_sp(void)
{
return test_req_mem_send_sp_to_sp(FFA_MEM_SHARE_SMC32, SP_ID(3),
- SP_ID(2));
+ SP_ID(2), false);
+}
+
+test_result_t test_req_ns_mem_share_sp_to_sp(void)
+{
+ /*
+ * Skip the test when RME is enabled (for test setup reasons).
+ * For RME tests, the model specifies 48b physical address size
+ * at the PE, but misses allocating RAM and increasing the PA at
+ * the interconnect level.
+ */
+ if (get_armv9_2_feat_rme_support() != 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* This test requires 48b physical address size capability. */
+ SKIP_TEST_IF_PA_SIZE_LESS_THAN(48);
+
+ return test_req_mem_send_sp_to_sp(FFA_MEM_SHARE_SMC32, SP_ID(3),
+ SP_ID(2), true);
}
test_result_t test_req_mem_lend_sp_to_sp(void)
{
return test_req_mem_send_sp_to_sp(FFA_MEM_LEND_SMC32, SP_ID(3),
- SP_ID(2));
+ SP_ID(2), false);
}
test_result_t test_req_mem_donate_sp_to_sp(void)
{
return test_req_mem_send_sp_to_sp(FFA_MEM_DONATE_SMC32, SP_ID(1),
- SP_ID(3));
+ SP_ID(3), false);
+}
+
+test_result_t test_req_mem_share_sp_to_vm(void)
+{
+ return test_req_mem_send_sp_to_vm(FFA_MEM_SHARE_SMC32, SP_ID(1),
+ HYP_ID);
+}
+
+test_result_t test_req_mem_lend_sp_to_vm(void)
+{
+ return test_req_mem_send_sp_to_vm(FFA_MEM_LEND_SMC32, SP_ID(2),
+ HYP_ID);
+}
+
+test_result_t test_mem_share_to_sp_clear_memory(void)
+{
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ struct mailbox_buffers mb;
+ uint32_t remaining_constituent_count;
+ uint32_t total_length;
+ uint32_t fragment_length;
+ ffa_memory_handle_t handle;
+ struct ffa_value ret;
+ /* Arbitrarily write 10 words after using shared memory. */
+ const uint32_t nr_words_to_write = 10U;
+
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(
+ RECEIVER, FFA_MEM_LEND_SMC32);
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ remaining_constituent_count = ffa_memory_region_init(
+ (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+ &receiver, 1, constituents, constituents_count, 0,
+ FFA_MEMORY_REGION_FLAG_CLEAR,
+ FFA_MEMORY_NOT_SPECIFIED_MEM, 0, 0,
+ &total_length, &fragment_length);
+
+ if (remaining_constituent_count != 0) {
+ ERROR("Transaction descriptor initialization failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ handle = memory_send(mb.send, FFA_MEM_LEND_SMC32, constituents,
+ constituents_count, remaining_constituent_count,
+ fragment_length, total_length, &ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("Memory Share failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Memory has been shared!\n");
+
+ ret = cactus_mem_send_cmd(SENDER, RECEIVER, FFA_MEM_LEND_SMC32, handle,
+ FFA_MEMORY_REGION_FLAG_CLEAR,
+ nr_words_to_write, false);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ ERROR("Failed memory send operation!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_mem_reclaim(handle, 0);
+
+ if (is_ffa_call_error(ret)) {
+ ERROR("Memory reclaim failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Print `region` if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ */
+static void print_memory_region(struct ffa_memory_region *region)
+{
+ VERBOSE("region.sender = %d\n", region->sender);
+ VERBOSE("region.attributes.shareability = %d\n",
+ region->attributes.shareability);
+ VERBOSE("region.attributes.cacheability = %d\n",
+ region->attributes.cacheability);
+ VERBOSE("region.attributes.type = %d\n", region->attributes.type);
+ VERBOSE("region.attributes.security = %d\n",
+ region->attributes.security);
+ VERBOSE("region.flags = %d\n", region->flags);
+ VERBOSE("region.handle = %lld\n", region->handle);
+ VERBOSE("region.tag = %lld\n", region->tag);
+ VERBOSE("region.memory_access_desc_size = %d\n",
+ region->memory_access_desc_size);
+ VERBOSE("region.receiver_count = %d\n", region->receiver_count);
+ VERBOSE("region.receivers_offset = %d\n", region->receivers_offset);
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_retrieve_response(const struct ffa_memory_region *region1,
+ const struct ffa_memory_region *region2)
+{
+ if (region1->sender != region2->sender) {
+ ERROR("region1.sender=%d, expected %d\n", region1->sender,
+ region2->sender);
+ return false;
+ }
+ if (region1->attributes.shareability != region2->attributes.shareability) {
+ ERROR("region1.attributes.shareability=%d, expected %d\n",
+ region1->attributes.shareability,
+ region2->attributes.shareability);
+ return false;
+ }
+ if (region1->attributes.cacheability != region2->attributes.cacheability) {
+ ERROR("region1.attributes.cacheability=%d, expected %d\n",
+ region1->attributes.cacheability,
+ region2->attributes.cacheability);
+ return false;
+ }
+ if (region1->attributes.type != region2->attributes.type) {
+ ERROR("region1.attributes.type=%d, expected %d\n",
+ region1->attributes.type, region2->attributes.type);
+ return false;
+ }
+ if (region1->attributes.security != region2->attributes.security) {
+ ERROR("region1.attributes.security=%d, expected %d\n",
+ region1->attributes.security, region2->attributes.security);
+ return false;
+ }
+ if (region1->flags != region2->flags) {
+ ERROR("region1->flags=%d, expected %d\n", region1->flags,
+ region2->flags);
+ return false;
+ }
+ if (region1->handle != region2->handle) {
+ ERROR("region1.handle=%lld, expected %lld\n", region1->handle,
+ region2->handle);
+ return false;
+ }
+ if (region1->tag != region2->tag) {
+ ERROR("region1.tag=%lld, expected %lld\n", region1->tag, region2->tag);
+ return false;
+ }
+ if (region1->memory_access_desc_size != region2->memory_access_desc_size) {
+ ERROR("region1.memory_access_desc_size=%d, expected %d\n",
+ region1->memory_access_desc_size,
+ region2->memory_access_desc_size);
+ return false;
+ }
+ if (region1->receiver_count != region2->receiver_count) {
+ ERROR("region1.receiver_count=%d, expected %d\n",
+ region1->receiver_count, region2->receiver_count);
+ return false;
+ }
+ if (region1->receivers_offset != region2->receivers_offset) {
+ ERROR("region1.receivers_offset=%d, expected %d\n",
+ region1->receivers_offset, region2->receivers_offset);
+ return false;
+ }
+ for (uint32_t i = 0; i < 3; i++) {
+ if (region1->reserved[i] != 0) {
+ ERROR("region.reserved[%d]=%d, expected 0\n", i,
+ region1->reserved[i]);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool
+verify_constituent(struct ffa_memory_region_constituent *constituent,
+ void *address, uint32_t page_count)
+{
+ if (constituent->address != address) {
+ ERROR("constituent.address=%p, expected %p\n",
+ constituent->address, address);
+ return false;
+ }
+ if (constituent->page_count != page_count) {
+ ERROR("constituent.page_count=%d, expected %d\n",
+ constituent->page_count, page_count);
+ return false;
+ }
+ if (constituent->reserved != 0) {
+ ERROR("constituent.reserved=%d, expected 0\n",
+ constituent->reserved);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_composite(struct ffa_composite_memory_region *composite,
+ struct ffa_memory_region_constituent *constituent,
+ uint32_t page_count, uint32_t constituent_count)
+{
+ if (composite->page_count != page_count) {
+ ERROR("composite.page_count=%d, expected %d\n",
+ composite->page_count, page_count);
+ return false;
+ }
+ if (composite->constituent_count != constituent_count) {
+ ERROR("composite.constituent_count=%d, expected %d\n",
+ composite->constituent_count, constituent_count);
+ return false;
+ }
+ if (composite->reserved_0 != 0) {
+ ERROR("composite.reserved_0=%llu, expected 0\n",
+ composite->reserved_0);
+ return false;
+ }
+ for (uint32_t j = 0; j < composite->constituent_count; j++) {
+ if (!verify_constituent(constituent, share_page, 1)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool verify_receivers_impdef(struct ffa_memory_access_impdef impdef1,
+ struct ffa_memory_access_impdef impdef2)
+{
+ if (impdef1.val[0] != impdef2.val[0] ||
+ impdef1.val[1] != impdef2.val[1]) {
+ ERROR("ipmdef1.val[0]=%llu expected=%llu"
+ " ipmdef1.val[1]=%llu expected=%llu\n",
+ impdef1.val[0], impdef2.val[0],
+ impdef1.val[1], impdef2.val[1]);
+ return false;
+ }
+
+ return true;
+}
+
+static bool verify_permissions(
+ ffa_memory_access_permissions_t permissions1,
+ ffa_memory_access_permissions_t permissions2)
+{
+ uint8_t access1;
+ uint8_t access2;
+
+ access1 = permissions1.data_access;
+ access2 = permissions2.data_access;
+
+ if (access1 != access2) {
+ ERROR("permissions1.data_access=%u expected=%u\n",
+ access1, access2);
+ return false;
+ }
+
+ access1 = permissions1.instruction_access;
+ access2 = permissions2.instruction_access;
+
+ if (access1 != access2) {
+ ERROR("permissions1.instruction_access=%u expected=%u\n",
+ access1, access2);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Used by hypervisor retrieve request test: validate descriptors provided by
+ * SPMC.
+ */
+static bool verify_receivers(struct ffa_memory_access *receivers1,
+ struct ffa_memory_access *receivers2,
+ uint32_t receivers_count)
+{
+ for (uint32_t i = 0; i < receivers_count; i++) {
+ if (receivers1[i].receiver_permissions.receiver !=
+ receivers2[i].receiver_permissions.receiver) {
+ ERROR("receivers1[%u].receiver_permissions.receiver=%x"
+ " expected=%x\n", i,
+ receivers1[i].receiver_permissions.receiver,
+ receivers2[i].receiver_permissions.receiver);
+ return false;
+ }
+
+ if (receivers1[i].receiver_permissions.flags !=
+ receivers2[i].receiver_permissions.flags) {
+ ERROR("receivers1[%u].receiver_permissions.flags=%u"
+ " expected=%u\n", i,
+ receivers1[i].receiver_permissions.flags,
+ receivers2[i].receiver_permissions.flags);
+ return false;
+ }
+
+ if (!verify_permissions(
+ receivers1[i].receiver_permissions.permissions,
+ receivers2[i].receiver_permissions.permissions)) {
+ return false;
+ }
+
+ if (receivers1[i].composite_memory_region_offset !=
+ receivers2[i].composite_memory_region_offset) {
+ ERROR("receivers1[%u].composite_memory_region_offset=%u"
+ " expected %u\n",
+ i, receivers1[i].composite_memory_region_offset,
+ receivers2[i].composite_memory_region_offset);
+ return false;
+ }
+
+ if (!verify_receivers_impdef(receivers1[i].impdef,
+ receivers1[i].impdef)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Helper for performing a hypervisor retrieve request test.
+ */
+static test_result_t hypervisor_retrieve_request_test_helper(
+ uint32_t mem_func, bool multiple_receivers, bool fragmented)
+{
+ static struct ffa_memory_region_constituent
+ sent_constituents[FRAGMENTED_SHARE_PAGE_COUNT];
+ __aligned(PAGE_SIZE) static uint8_t page[PAGE_SIZE * 2] = {0};
+ struct ffa_memory_region *hypervisor_retrieve_response =
+ (struct ffa_memory_region *)page;
+ struct ffa_memory_region expected_response;
+ struct mailbox_buffers mb;
+ ffa_memory_handle_t handle;
+ struct ffa_value ret;
+ struct ffa_composite_memory_region *composite;
+ struct ffa_memory_access *retrvd_receivers;
+ uint32_t expected_flags = 0;
+
+ ffa_memory_attributes_t expected_attrs = {
+ .cacheability = FFA_MEMORY_CACHE_WRITE_BACK,
+ .shareability = FFA_MEMORY_INNER_SHAREABLE,
+ .security = FFA_MEMORY_SECURITY_NON_SECURE,
+ .type = (!multiple_receivers && mem_func != FFA_MEM_SHARE_SMC32)
+ ? FFA_MEMORY_NOT_SPECIFIED_MEM
+ : FFA_MEMORY_NORMAL_MEM,
+ };
+
+ struct ffa_memory_access receivers[2] = {
+ ffa_memory_access_init_permissions_from_mem_func(SP_ID(1),
+ mem_func),
+ ffa_memory_access_init_permissions_from_mem_func(SP_ID(2),
+ mem_func),
+ };
+
+ /*
+ * Only pass 1 receiver to `memory_init_and_send` if we are not testing
+ * the multiple-receivers functionality of the hypervisor retrieve
+ * request.
+ */
+ uint32_t receiver_count =
+ multiple_receivers ? ARRAY_SIZE(receivers) : 1;
+
+ uint32_t sent_constituents_count =
+ fragmented ? ARRAY_SIZE(sent_constituents) : 1;
+
+ /* Prepare the composite offset for the comparison. */
+ for (uint32_t i = 0; i < receiver_count; i++) {
+ receivers[i].composite_memory_region_offset =
+ sizeof(struct ffa_memory_region) +
+ receiver_count *
+ sizeof(struct ffa_memory_access);
+ }
+
+ /* Add a page per constituent, so that we exhaust the size of a single
+ * fragment (for testing). In a real world scenario, the whole region
+ * could be described in a single constituent.
+ */
+ for (uint32_t i = 0; i < sent_constituents_count; i++) {
+ sent_constituents[i].address = share_page + i * PAGE_SIZE;
+ sent_constituents[i].page_count = 1;
+ sent_constituents[i].reserved = 0;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+ GET_TFTF_MAILBOX(mb);
+
+ switch (mem_func) {
+ case FFA_MEM_SHARE_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE;
+ break;
+ case FFA_MEM_LEND_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND;
+ break;
+ case FFA_MEM_DONATE_SMC32:
+ expected_flags = FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE;
+ break;
+ default:
+ ERROR("Invalid mem_func: %d\n", mem_func);
+ panic();
+ }
+
+ handle = memory_init_and_send(mb.send, MAILBOX_SIZE, SENDER, receivers,
+ receiver_count, sent_constituents,
+ sent_constituents_count, mem_func, &ret);
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("Memory share failed: %d\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send Hypervisor Retrieve request according to section 17.4.3 of FFA
+ * v1.2-REL0 specification.
+ */
+ if (!hypervisor_retrieve_request(&mb, handle, page, sizeof(page))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ print_memory_region(hypervisor_retrieve_response);
+
+ /*
+ * Verify the received `FFA_MEM_RETRIEVE_RESP` aligns with
+ * transaction description sent above.
+ */
+ expected_response = (struct ffa_memory_region) {
+ .sender = SENDER,
+ .attributes = expected_attrs,
+ .flags = expected_flags,
+ .handle = handle,
+ .tag = 0,
+ .memory_access_desc_size = sizeof(struct ffa_memory_access),
+ .receiver_count = receiver_count,
+ .receivers_offset =
+ offsetof(struct ffa_memory_region, receivers),
+ };
+
+ if (!verify_retrieve_response(hypervisor_retrieve_response,
+ &expected_response)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ retrvd_receivers =
+ ffa_memory_region_get_receiver(hypervisor_retrieve_response, 0);
+
+ if (!verify_receivers(retrvd_receivers,
+ receivers, receiver_count)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ composite = ffa_memory_region_get_composite(
+ hypervisor_retrieve_response, 0);
+
+ if (!verify_composite(composite, composite->constituents,
+ sent_constituents_count, sent_constituents_count)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Reclaim for the SPMC to deallocate any data related to the handle.
+ */
+ ret = ffa_mem_reclaim(handle, 0);
+ if (is_ffa_call_error(ret)) {
+ ERROR("Memory reclaim failed: %d\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+test_result_t test_hypervisor_share_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_lend_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_donate_retrieve(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_DONATE_SMC32, false, false);
+}
+
+test_result_t test_hypervisor_share_retrieve_multiple_receivers(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, true, false);
+}
+
+test_result_t test_hypervisor_lend_retrieve_multiple_receivers(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, true, false);
+}
+
+test_result_t test_hypervisor_share_retrieve_fragmented(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_SHARE_SMC32, false, true);
+}
+
+test_result_t test_hypervisor_lend_retrieve_fragmented(void)
+{
+ return hypervisor_retrieve_request_test_helper(FFA_MEM_LEND_SMC32, false, true);
+}
+
+/**
+ * Test helper that performs memory sharing operation, and alters the PAS
+ * of the memory, to validate that SPM intersects the operation in case the PAS
+ * is not coherent with its use. Relevant for the functioning of FFA_MEM_LEND
+ * and FFA_MEM_DONATE from NWd to an SP.
+ *
+ * In cases the memory is not in NS state, the SPMC should intersect memory
+ * management call with an appropriate FFA_ERROR.
+ */
+static test_result_t test_ffa_mem_send_realm_expect_fail(
+ uint32_t mem_func, ffa_id_t borrower,
+ struct ffa_memory_region_constituent *constituents,
+ size_t constituents_count, uint64_t delegate_addr)
+{
+ struct ffa_value ret;
+ uint32_t remaining_constituent_count;
+ uint32_t total_length;
+ uint32_t fragment_length;
+ struct mailbox_buffers mb;
+ u_register_t ret_rmm;
+ test_result_t result = TEST_RESULT_FAIL;
+ struct ffa_memory_access receiver =
+ ffa_memory_access_init_permissions_from_mem_func(borrower,
+ mem_func);
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /***********************************************************************
+ * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ register_custom_sync_exception_handler(data_abort_handler);
+
+ /*
+ * Delegate page to a realm. This should make memory sharing operation
+ * fail.
+ */
+ ret_rmm = host_rmi_granule_delegate((u_register_t)delegate_addr);
+
+ if (ret_rmm != 0UL) {
+ INFO("Delegate operation returns 0x%lx for address %llx\n",
+ ret_rmm, delegate_addr);
+ return TEST_RESULT_FAIL;
+ }
+
+ remaining_constituent_count = ffa_memory_region_init(
+ (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+ &receiver, 1, constituents, constituents_count, 0,
+ FFA_MEMORY_REGION_FLAG_CLEAR,
+ FFA_MEMORY_NOT_SPECIFIED_MEM, 0, 0,
+ &total_length, &fragment_length);
+
+ if (remaining_constituent_count != 0) {
+ goto out;
+ }
+
+ switch (mem_func) {
+ case FFA_MEM_LEND_SMC32:
+ ret = ffa_mem_lend(total_length, fragment_length);
+ break;
+ case FFA_MEM_DONATE_SMC32:
+ ret = ffa_mem_donate(total_length, fragment_length);
+ break;
+ default:
+ ERROR("Not expected for func name: %x\n", mem_func);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ goto out;
+ }
+
+ /* Undelegate to reestablish the same security state for PAS. */
+ ret_rmm = host_rmi_granule_undelegate((u_register_t)delegate_addr);
+
+ for (uint32_t i = 0; i < constituents_count; i++) {
+ uint32_t *ptr = (uint32_t *)constituents[i].address;
+
+ *ptr = 0xFFA;
+ }
+
+ if (get_gpc_abort_triggered()) {
+ ERROR("Exception due to GPC for lend/donate with RME. Not"
+ " expected for this case.\n");
+ result = TEST_RESULT_FAIL;
+ } else {
+ result = TEST_RESULT_SUCCESS;
+ }
+out:
+ unregister_custom_sync_exception_handler();
+
+ if (ret_rmm != 0UL) {
+ INFO("Undelegate operation returns 0x%lx for address %llx\n",
+ ret_rmm, (uint64_t)delegate_addr);
+ return TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/**
+ * Memory to be shared between partitions is described in a composite, with
+ * various constituents. In an RME system, the memory must be in NS PAS in
+ * operations from NWd to an SP. In case the PAS is not following this
+ * expectation memory lend/donate should fail, and all constituents must
+ * remain in the NS PAS.
+ *
+ * This test validates that if one page in the middle of one of the constituents
+ * is not in the NS PAS the operation fails.
+ */
+test_result_t test_ffa_mem_send_sp_realm_memory(void)
+{
+ test_result_t ret;
+ uint32_t mem_func[] = {FFA_MEM_LEND_SMC32, FFA_MEM_DONATE_SMC32};
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+
+ for (unsigned j = 0; j < ARRAY_SIZE(mem_func); j++) {
+ for (unsigned int i = 0; i < 4; i++) {
+ /* Address to be delegated to Realm PAS. */
+ uint64_t realm_addr =
+ (uint64_t)&four_share_pages[i * PAGE_SIZE];
+
+ INFO("%s memory with realm addr: %llx\n",
+ mem_func[j] == FFA_MEM_LEND_SMC32
+ ? "Lend"
+ : "Donate",
+ realm_addr);
+
+ ret = test_ffa_mem_send_realm_expect_fail(
+ mem_func[j], SP_ID(1), constituents,
+ constituents_count, realm_addr);
+
+ if (ret != TEST_RESULT_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Memory to be shared between partitions is described in a composite, with
+ * various constituents. In an RME system, the memory must be in NS PAS in
+ * operations from NWd to an SP. In case the PAS is not following this
+ * expectation memory lend/donate should fail, and all constituents must
+ * remain in the NS PAS.
+ *
+ * This test validates the case in which the memory lend/donate fail in
+ * case one of the constituents in the composite is not in the NS PAS.
+ */
+test_result_t test_ffa_mem_lend_sp_realm_memory_separate_constituent(void)
+{
+ test_result_t ret;
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+ const uint32_t constituents_count = sizeof(constituents) /
+ sizeof(struct ffa_memory_region_constituent);
+ /* Address to be delegated to Realm PAS. */
+ uint64_t realm_addr = (uint64_t)&share_page[0];
+
+ INFO("Sharing memory with realm addr: %llx\n", realm_addr);
+
+ ret = test_ffa_mem_send_realm_expect_fail(
+ FFA_MEM_LEND_SMC32, SP_ID(1), constituents,
+ constituents_count, realm_addr);
+
+ return ret;
}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c b/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c
new file mode 100644
index 000000000..9ca337a39
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_notifications.c
@@ -0,0 +1,1564 @@
+/*
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+#include <irq.h>
+#include <smccc.h>
+
+#include <arch_helpers.h>
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_svc.h>
+#include <platform.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+
+/**
+ * Defining variables to test the per-vCPU notifications.
+ * The conceived test follows the same logic, despite the sender receiver type
+ * of endpoint (VM or secure partition).
+ * Using global variables because these need to be accessed in the cpu on handler
+ * function 'request_notification_get_per_vcpu_on_handler'.
+ * In each specific test function, change 'per_vcpu_receiver' and
+ * 'per_vcpu_sender' have the logic work for:
+ * - NWd to SP;
+ * - SP to NWd;
+ * - SP to SP.
+ */
+static ffa_id_t per_vcpu_receiver;
+static ffa_id_t per_vcpu_sender;
+uint32_t per_vcpu_flags_get;
+static event_t per_vcpu_finished[PLATFORM_CORE_COUNT];
+
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}
+};
+
+static ffa_notification_bitmap_t g_notifications = FFA_NOTIFICATION(0) |
+ FFA_NOTIFICATION(1) |
+ FFA_NOTIFICATION(30) |
+ FFA_NOTIFICATION(50) |
+ FFA_NOTIFICATION(63);
+
+/**
+ * Use FFA_FEATURES to retrieve the ID of:
+ * - Schedule Receiver Interrupt
+ * - Notification Pending Interrupt
+ * - Managed Exit Interrupt
+ * Validate the call works as expected, and they match the used int ID in the
+ * remainder of the tests.
+ */
+test_result_t test_notifications_retrieve_int_ids(void)
+{
+ struct ffa_value ret;
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ /* Check if SPMC is OP-TEE at S-EL1 */
+ if (check_spmc_execution_level()) {
+ /* FFA_FEATURES is not yet supported in OP-TEE */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ ret = ffa_features(FFA_FEATURE_NPI);
+ if (is_ffa_call_error(ret) ||
+ ffa_feature_intid(ret) != NOTIFICATION_PENDING_INTERRUPT_INTID) {
+ ERROR("Failed to retrieved NPI (exp: %u, got: %u)\n",
+ NOTIFICATION_PENDING_INTERRUPT_INTID,
+ ffa_feature_intid(ret));
+
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_features(FFA_FEATURE_SRI);
+ if (is_ffa_call_error(ret) ||
+ ffa_feature_intid(ret) != FFA_SCHEDULE_RECEIVER_INTERRUPT_ID) {
+ ERROR("Failed to retrieved SRI (exp: %u, got: %u)\n",
+ FFA_SCHEDULE_RECEIVER_INTERRUPT_ID,
+ ffa_feature_intid(ret));
+
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_features(FFA_FEATURE_MEI);
+ if (is_ffa_call_error(ret) ||
+ ffa_feature_intid(ret) != MANAGED_EXIT_INTERRUPT_ID) {
+ ERROR("Failed to retrieved MEI (exp: %u, got: %u)\n",
+ MANAGED_EXIT_INTERRUPT_ID,
+ ffa_feature_intid(ret));
+
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Helper to create bitmap for NWd VMs.
+ */
+static bool notifications_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
+{
+ VERBOSE("Creating bitmap for VM %x; cpu count: %u.\n",
+ vm_id, vcpu_count);
+ struct ffa_value ret = ffa_notification_bitmap_create(vm_id,
+ vcpu_count);
+
+ return !is_ffa_call_error(ret);
+}
+
+/**
+ * Helper to destroy bitmap for NWd VMs.
+ */
+static bool notifications_bitmap_destroy(ffa_id_t vm_id)
+{
+ VERBOSE("Destroying bitmap of VM %x.\n", vm_id);
+ struct ffa_value ret = ffa_notification_bitmap_destroy(vm_id);
+
+ return !is_ffa_call_error(ret);
+}
+
+/**
+ * Test notifications bitmap create and destroy interfaces.
+ */
+test_result_t test_ffa_notifications_bitmap_create_destroy(void)
+{
+ const ffa_id_t vm_id = VM_ID(1);
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!notifications_bitmap_create(vm_id, PLATFORM_CORE_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test notifications bitmap destroy in a case the bitmap hasn't been created.
+ */
+test_result_t test_ffa_notifications_destroy_not_created(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ struct ffa_value ret = ffa_notification_bitmap_destroy(VM_ID(1));
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test attempt to create notifications bitmap for NWd VM if it had been
+ * already created.
+ */
+test_result_t test_ffa_notifications_create_after_create(void)
+{
+ struct ffa_value ret;
+ const ffa_id_t vm_id = VM_ID(2);
+
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /* First successfully create a notifications bitmap */
+ if (!notifications_bitmap_create(vm_id, 1)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Attempt to do the same to the same VM. */
+ ret = ffa_notification_bitmap_create(vm_id, 1);
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Destroy to not affect other tests */
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Helper function to test FFA_NOTIFICATION_BIND interface.
+ * Receives all arguments to use 'cactus_notification_bind_send_cmd', and
+ * expected response for the test command.
+ *
+ * Returns:
+ * - 'true' if response was obtained and it was as expected;
+ * - 'false' if there was an error with use of FFA_MSG_SEND_DIRECT_REQ, or
+ * the obtained response was not as expected.
+ */
+static bool request_notification_bind(
+ ffa_id_t cmd_dest, ffa_id_t receiver, ffa_id_t sender,
+ ffa_notification_bitmap_t notifications, uint32_t flags,
+ uint32_t expected_resp, uint32_t error_code)
+{
+ struct ffa_value ret;
+
+ VERBOSE("TFTF requesting SP to bind notifications!\n");
+
+ ret = cactus_notification_bind_send_cmd(HYP_ID, cmd_dest, receiver,
+ sender, notifications, flags);
+
+ if (!is_expected_cactus_response(ret, expected_resp, error_code)) {
+ ERROR("Failed notifications bind. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Helper function to test FFA_NOTIFICATION_UNBIND interface.
+ * Receives all arguments to use 'cactus_notification_unbind_send_cmd', and
+ * expected response for the test command.
+ *
+ * Returns:
+ * - 'true' if response was obtained and it was as expected;
+ * - 'false' if there was an error with use of FFA_MSG_SEND_DIRECT_REQ, or
+ * the obtained response was not as expected.
+ */
+static bool request_notification_unbind(
+ ffa_id_t cmd_dest, ffa_id_t receiver, ffa_id_t sender,
+ ffa_notification_bitmap_t notifications, uint32_t expected_resp,
+ uint32_t error_code)
+{
+ struct ffa_value ret;
+
+ VERBOSE("TFTF requesting SP to unbind notifications!\n");
+
+ ret = cactus_notification_unbind_send_cmd(HYP_ID, cmd_dest, receiver,
+ sender, notifications);
+
+ if (!is_expected_cactus_response(ret, expected_resp, error_code)) {
+ ERROR("Failed notifications unbind. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Test calls from SPs to the bind and unbind interfaces, expecting success
+ * returns.
+ * This test issues a request via direct messaging to the SP, which executes
+ * the test and responds with the result of the call.
+ */
+test_result_t test_ffa_notifications_sp_bind_unbind(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /** First bind... */
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), 1,
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** ... then unbind using the same arguments. */
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), 1,
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test successful attempt of doing bind and unbind of the same set of
+ * notifications.
+ */
+test_result_t test_ffa_notifications_vm_bind_unbind(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t vm_id = VM_ID(1);
+ struct ffa_value ret;
+
+ if (!notifications_bitmap_create(vm_id, 1)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_bind(SP_ID(2), vm_id, 0, g_notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_unbind(SP_ID(2), vm_id, g_notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test expected failure of using a NS FF-A ID for the sender.
+ */
+test_result_t test_ffa_notifications_vm_bind_vm(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t vm_id = VM_ID(1);
+ const ffa_id_t sender_id = VM_ID(2);
+ struct ffa_value ret;
+
+ if (!notifications_bitmap_create(vm_id, 1)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_bind(sender_id, vm_id, 0, g_notifications);
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(vm_id)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test failure of both bind and unbind in case at least one notification is
+ * already bound to another FF-A endpoint.
+ * Expect error code FFA_ERROR_DENIED.
+ */
+test_result_t test_ffa_notifications_already_bound(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /** Bind first to test */
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** Attempt to bind notifications bound in above request. */
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(3),
+ g_notifications, 0, CACTUS_ERROR,
+ FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** Attempt to unbind notifications bound in initial request. */
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(3),
+ g_notifications, CACTUS_ERROR,
+ FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /** Reset the state the SP's notifications state. */
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(2),
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Try to bind/unbind notifications spoofing the identity of the receiver.
+ * Commands will be sent to SP_ID(1), which will use SP_ID(3) as the receiver.
+ * Expect error code FFA_ERROR_INVALID_PARAMETER.
+ */
+test_result_t test_ffa_notifications_bind_unbind_spoofing(void)
+{
+ ffa_notification_bitmap_t notifications = FFA_NOTIFICATION(8);
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ if (!request_notification_bind(SP_ID(1), SP_ID(3), SP_ID(2),
+ notifications, 0, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(SP_ID(1), SP_ID(3), SP_ID(2),
+ notifications, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Call FFA_NOTIFICATION_BIND with notifications bitmap zeroed.
+ * Expecting error code FFA_ERROR_INVALID_PARAMETER.
+ */
+test_result_t test_ffa_notifications_bind_unbind_zeroed(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ if (!request_notification_bind(SP_ID(1), SP_ID(1), SP_ID(2),
+ 0, 0, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(SP_ID(1), SP_ID(1), SP_ID(2),
+ 0, CACTUS_ERROR,
+ FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Helper function to test FFA_NOTIFICATION_GET interface.
+ * Receives all arguments to use 'cactus_notification_get_send_cmd', and returns
+ * the received response. Depending on the testing scenario, this will allow
+ * to validate if the returned bitmaps are as expected.
+ *
+ * Returns:
+ * - 'true' if response was obtained.
+ * - 'false' if there was an error sending the request.
+ */
+static bool request_notification_get(
+ ffa_id_t cmd_dest, ffa_id_t receiver, uint32_t vcpu_id, uint32_t flags,
+ bool check_npi_handled, struct ffa_value *response)
+{
+ VERBOSE("TFTF requesting SP to get notifications!\n");
+
+ *response = cactus_notification_get_send_cmd(HYP_ID, cmd_dest,
+ receiver, vcpu_id,
+ flags, check_npi_handled);
+
+ return is_ffa_direct_response(*response);
+}
+
+static bool request_notification_set(
+ ffa_id_t cmd_dest, ffa_id_t receiver, ffa_id_t sender, uint32_t flags,
+ ffa_notification_bitmap_t notifications, ffa_id_t echo_dest,
+ uint32_t exp_resp, int32_t exp_error)
+{
+ struct ffa_value ret;
+
+ VERBOSE("TFTF requesting SP %x (as %x) to set notifications to %x\n",
+ cmd_dest, sender, receiver);
+
+ ret = cactus_notifications_set_send_cmd(HYP_ID, cmd_dest, receiver,
+ sender, flags, notifications,
+ echo_dest);
+
+ if (!is_expected_cactus_response(ret, exp_resp, exp_error)) {
+ ERROR("Failed notifications set. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Helper to set notification. If sender is VM, the function will call directly
+ * FFA_NOTIFICATION_SET, if it is an SP it will request the SP to set
+ * notifications. In both cases it is expected a successful outcome.
+ */
+static bool notification_set(ffa_id_t receiver, ffa_id_t sender,
+ uint32_t flags,
+ ffa_notification_bitmap_t notifications)
+{
+ struct ffa_value ret;
+
+ /* Sender sets notifications to receiver. */
+ if (!IS_SP_ID(sender)) {
+ VERBOSE("VM %x Setting notifications %llx to receiver %x\n",
+ sender, notifications, receiver);
+ ret = ffa_notification_set(sender, receiver, flags, notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ ERROR("Failed notifications set. receiver: %x; sender: %x\n",
+ receiver, sender);
+ return false;
+ }
+ return true;
+ }
+
+ return request_notification_set(sender, receiver, sender, flags,
+ notifications, 0, CACTUS_SUCCESS, 0);
+}
+
+/**
+ * Check that SP's response to CACTUS_NOTIFICATION_GET_CMD is as expected.
+ */
+static bool is_notifications_get_as_expected(
+ struct ffa_value *ret, uint64_t exp_from_sp, uint64_t exp_from_vm,
+ ffa_id_t receiver)
+{
+ uint64_t from_sp;
+ uint64_t from_vm;
+ bool success_ret;
+
+ /**
+ * If receiver ID is SP, this is to evaluate the response to test
+ * command 'CACTUS_NOTIFICATION_GET_CMD'.
+ */
+ if (IS_SP_ID(receiver)) {
+ success_ret = (cactus_get_response(*ret) == CACTUS_SUCCESS);
+ from_sp = cactus_notifications_get_from_sp(*ret);
+ from_vm = cactus_notifications_get_from_vm(*ret);
+ } else {
+ /**
+ * Else, this is to evaluate the return of FF-A call:
+ * ffa_notification_get.
+ */
+ success_ret = (ffa_func_id(*ret) == FFA_SUCCESS_SMC32);
+ from_sp = ffa_notifications_get_from_sp(*ret);
+ from_vm = ffa_notifications_get_from_vm(*ret);
+ }
+
+ if (success_ret != true ||
+ exp_from_sp != from_sp ||
+ exp_from_vm != from_vm) {
+ VERBOSE("Notifications not as expected:\n"
+ " from sp: %llx exp: %llx\n"
+ " from vm: %llx exp: %llx\n",
+ from_sp, exp_from_sp, from_vm, exp_from_vm);
+ return false;
+ }
+
+ return true;
+}
+
+static bool is_notifications_info_get_as_expected(
+ struct ffa_value *ret, uint16_t *ids, uint32_t *lists_sizes,
+ const uint32_t max_ids_count, uint32_t lists_count, bool more_pending)
+{
+ if (lists_count != ffa_notifications_info_get_lists_count(*ret) ||
+ more_pending != ffa_notifications_info_get_more_pending(*ret)) {
+ ERROR("Notification info get not as expected.\n"
+ " Lists counts: %u; more pending %u\n",
+ ffa_notifications_info_get_lists_count(*ret),
+ ffa_notifications_info_get_more_pending(*ret));
+ dump_ffa_value(*ret);
+ return false;
+ }
+
+ for (uint32_t i = 0; i < lists_count; i++) {
+ uint32_t cur_size =
+ ffa_notifications_info_get_list_size(*ret,
+ i + 1);
+
+ if (lists_sizes[i] != cur_size) {
+ ERROR("Expected list size[%u] %u != %u\n", i,
+ lists_sizes[i], cur_size);
+ return false;
+ }
+ }
+
+ /* Compare the IDs list */
+ if (memcmp(&ret->arg3, ids, sizeof(ids[0]) * max_ids_count) != 0) {
+ ERROR("List of IDs not as expected\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Helper to bind notification and set it.
+ * If receiver is SP it will request SP to perform the bind, else invokes
+ * FFA_NOTIFICATION_BIND.
+ * If Sender is SP it will request it to perform the set, else invokes
+ * FFA_NOTIFICATION_SET.
+ */
+static bool notification_bind_and_set(ffa_id_t sender,
+ ffa_id_t receiver, ffa_notification_bitmap_t notifications, uint32_t flags)
+{
+ struct ffa_value ret;
+ uint32_t flags_bind = flags & FFA_NOTIFICATIONS_FLAG_PER_VCPU;
+
+ /* Receiver binds notifications to sender. */
+ if (!IS_SP_ID(receiver)) {
+ ret = ffa_notification_bind(sender, receiver,
+ flags_bind, notifications);
+
+ if (is_ffa_call_error(ret)) {
+ return false;
+ }
+ } else {
+ if (!request_notification_bind(receiver, receiver, sender,
+ notifications, flags_bind,
+ CACTUS_SUCCESS,
+ 0)) {
+ return false;
+ }
+ }
+
+ return notification_set(receiver, sender, flags, notifications);
+}
+
+/**
+ * Helper to request SP to get the notifications and validate the return.
+ */
+static bool notification_get_and_validate(
+ ffa_id_t receiver, ffa_notification_bitmap_t exp_from_sp,
+ ffa_notification_bitmap_t exp_from_vm, uint32_t vcpu_id,
+ uint32_t flags, bool check_npi_handled)
+{
+ struct ffa_value ret;
+
+ /* Receiver gets pending notifications. */
+ if (IS_SP_ID(receiver)) {
+ request_notification_get(receiver, receiver, vcpu_id, flags,
+ check_npi_handled, &ret);
+ } else {
+ ret = ffa_notification_get(receiver, vcpu_id, flags);
+ }
+
+ return is_notifications_get_as_expected(&ret, exp_from_sp, exp_from_vm,
+ receiver);
+}
+
+static bool notifications_info_get(
+ uint16_t *expected_ids, uint32_t expected_lists_count,
+ uint32_t *expected_lists_sizes, const uint32_t max_ids_count,
+ bool expected_more_pending)
+{
+ struct ffa_value ret;
+
+ VERBOSE("Getting pending notification's info.\n");
+
+ ret = ffa_notification_info_get();
+
+ return !is_ffa_call_error(ret) &&
+ is_notifications_info_get_as_expected(&ret, expected_ids,
+ expected_lists_sizes,
+ max_ids_count,
+ expected_lists_count,
+ expected_more_pending);
+}
+
+static volatile int schedule_receiver_interrupt_received;
+
+static int schedule_receiver_interrupt_handler(void *data)
+{
+ assert(schedule_receiver_interrupt_received == 0);
+ schedule_receiver_interrupt_received = 1;
+ return 0;
+}
+
+/**
+ * Enable the Schedule Receiver Interrupt and register the respective
+ * handler.
+ */
+static void schedule_receiver_interrupt_init(void)
+{
+ tftf_irq_register_handler(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID,
+ schedule_receiver_interrupt_handler);
+
+ tftf_irq_enable(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID, 0xA);
+}
+
+/**
+ * Disable the Schedule Receiver Interrupt and unregister the respective
+ * handler.
+ */
+static void schedule_receiver_interrupt_deinit(void)
+{
+ tftf_irq_disable(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID);
+ tftf_irq_unregister_handler(FFA_SCHEDULE_RECEIVER_INTERRUPT_ID);
+ schedule_receiver_interrupt_received = 0;
+}
+
+bool check_schedule_receiver_interrupt_handled(void)
+{
+ if (schedule_receiver_interrupt_received == 1) {
+ VERBOSE("Schedule Receiver Interrupt handled!\n");
+ schedule_receiver_interrupt_received = 0;
+ return true;
+ }
+ VERBOSE("Schedule Receiver Interrupt NOT handled!\n");
+ return false;
+}
+
+/**
+ * Base function to test notifications signaling with an SP as a receiver.
+ */
+static test_result_t base_test_global_notifications_signal_sp(
+ const ffa_id_t sender, const ffa_id_t receiver,
+ const ffa_notification_bitmap_t notifications, const uint32_t flags_get)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ if (!IS_SP_ID(receiver)) {
+ ERROR("Receiver is expected to be an SP ID!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ schedule_receiver_interrupt_init();
+
+ if (!notification_bind_and_set(sender, receiver, notifications,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /**
+ * Simple list of IDs expected on return from FFA_NOTIFICATION_INFO_GET.
+ */
+ ids[0] = receiver;
+ lists_count = 1;
+
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notification_get_and_validate(
+ receiver, IS_SP_ID(sender) ? notifications : 0,
+ !IS_SP_ID(sender) ? notifications : 0, 0, flags_get, true)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!request_notification_unbind(receiver, receiver, sender,
+ notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test to validate a VM can signal an SP.
+ */
+test_result_t test_ffa_notifications_vm_signals_sp(void)
+{
+ return base_test_global_notifications_signal_sp(
+ 1, SP_ID(1), FFA_NOTIFICATION(1) | FFA_NOTIFICATION(60),
+ FFA_NOTIFICATIONS_FLAG_BITMAP_VM);
+}
+
+/**
+ * Test to validate an SP can signal an SP.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp(void)
+{
+ return base_test_global_notifications_signal_sp(
+ SP_ID(1), SP_ID(2), g_notifications,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP);
+}
+
+/**
+ * Test to validate an SP can signal a VM.
+ */
+test_result_t test_ffa_notifications_sp_signals_vm(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t sender = SP_ID(1);
+ const ffa_id_t receiver = VM_ID(1);
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_SP;
+ struct ffa_value ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ /* Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ /* Ask SPMC to allocate notifications bitmap. */
+ if (!notifications_bitmap_create(receiver, 1)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!notification_bind_and_set(sender, receiver, g_notifications,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * FFA_NOTIFICATION_INFO_GET return list should be simple, containing
+ * only the receiver's ID.
+ */
+ ids[0] = receiver;
+ lists_count = 1;
+
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Get pending notifications, and retrieve response. */
+ if (!notification_get_and_validate(receiver, g_notifications, 0, 0,
+ get_flags, false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ ret = ffa_notification_unbind(sender, receiver, g_notifications);
+
+ if (!is_expected_ffa_return(ret, FFA_SUCCESS_SMC32)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (!notifications_bitmap_destroy(receiver)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return result;
+}
+
+/**
+ * Test to validate it is not possible to unbind a pending notification.
+ */
+test_result_t test_ffa_notifications_unbind_pending(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t receiver = SP_ID(1);
+ const ffa_id_t sender = VM_ID(1);
+ const ffa_notification_bitmap_t notifications = FFA_NOTIFICATION(30) |
+ FFA_NOTIFICATION(35);
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_VM;
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!notification_bind_and_set(sender, receiver, notifications, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Attempt to unbind the pending notification, but expect error return
+ * given the notification is pending.
+ */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ FFA_NOTIFICATION(30),
+ CACTUS_ERROR, FFA_ERROR_DENIED)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Request receiver partition to get pending notifications from VMs.
+ * Only notification 30 is expected.
+ */
+ if (!notification_get_and_validate(receiver, 0, notifications, 0,
+ get_flags, false)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Unbind all notifications, to not interfere with other tests. */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ notifications, CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test the result of a call to FFA_NOTIFICATION_INFO_GET if no pending
+ * notifications.
+ */
+test_result_t test_ffa_notifications_info_get_none(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ if (check_spmc_execution_level()) {
+ VERBOSE("OPTEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ struct ffa_value ret;
+
+ ret = ffa_notification_info_get();
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_NO_DATA)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * CPU_ON handler for testing per-vCPU notifications to SPs (either from VMs
+ * or from SPs). It requests the SP to retrieve its pending notifications
+ * within its current Execution Context. The SP shall obtain all per-vCPU
+ * targeted to the running vCPU.
+ */
+static test_result_t request_notification_get_per_vcpu_on_handler(void)
+{
+ unsigned int core_pos = get_current_core_id();
+ test_result_t result = TEST_RESULT_FAIL;
+
+ uint64_t exp_from_vm = 0;
+ uint64_t exp_from_sp = 0;
+
+ if (IS_SP_ID(per_vcpu_sender)) {
+ exp_from_sp = FFA_NOTIFICATION(core_pos);
+ } else {
+ exp_from_vm = FFA_NOTIFICATION(core_pos);
+ }
+
+ VERBOSE("Request get per-vCPU notification to %x, core: %u.\n",
+ per_vcpu_receiver, core_pos);
+
+ /*
+ * Secure Partitions secondary ECs need one round of ffa_run to reach
+ * the message loop.
+ */
+ if (!spm_core_sp_init(per_vcpu_receiver)) {
+ goto out;
+ }
+
+ /*
+ * Request to get notifications sent to the respective vCPU.
+ * Check also if NPI was handled by the receiver. It should have been
+ * pended at notifications set, in the respective vCPU.
+ */
+ if (!notification_get_and_validate(
+ per_vcpu_receiver, exp_from_sp, exp_from_vm, core_pos,
+ per_vcpu_flags_get, true)) {
+ goto out;
+ }
+
+ result = TEST_RESULT_SUCCESS;
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+static test_result_t base_npi_enable_per_cpu(bool enable)
+{
+ test_result_t result = TEST_RESULT_FAIL;
+ uint32_t core_pos = get_current_core_id();
+
+ VERBOSE("Request SP %x to enable NPI in core %u\n",
+ per_vcpu_receiver, core_pos);
+
+ /*
+ * Secure Partitions secondary ECs need one round of ffa_run to reach
+ * the message loop.
+ */
+ if (!spm_core_sp_init(per_vcpu_receiver)) {
+ goto out;
+ }
+
+ result = TEST_RESULT_SUCCESS;
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+static test_result_t npi_enable_per_vcpu_on_handler(void)
+{
+ return base_npi_enable_per_cpu(true);
+}
+
+static test_result_t npi_disable_per_vcpu_on_handler(void)
+{
+ return base_npi_enable_per_cpu(false);
+}
+/**
+ * Base function to test signaling of per-vCPU notifications.
+ * Test whole flow between two FF-A endpoints: binding, getting notification
+ * info, and getting pending notifications.
+ * Each vCPU will receive a notification whose ID is the same as the core
+ * position.
+ */
+static test_result_t base_test_per_vcpu_notifications(ffa_id_t sender,
+ ffa_id_t receiver)
+{
+ /*
+ * Manually set variables to validate what should be the return of to
+ * FFA_NOTIFICATION_INFO_GET.
+ */
+ uint16_t exp_ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ receiver, 0, 1, 2,
+ receiver, 3, 4, 5,
+ receiver, 6, 7, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ };
+ uint32_t exp_lists_count = 3;
+ uint32_t exp_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ 3, 3, 2, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ const bool exp_more_notif_pending = false;
+ test_result_t result = TEST_RESULT_SUCCESS;
+ uint64_t notifications_to_unbind = 0;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ per_vcpu_flags_get = IS_SP_ID(sender)
+ ? FFA_NOTIFICATIONS_FLAG_BITMAP_SP
+ : FFA_NOTIFICATIONS_FLAG_BITMAP_VM;
+
+ /* Setting global variables to be accessed by the cpu_on handler. */
+ per_vcpu_receiver = receiver;
+ per_vcpu_sender = sender;
+
+ /* Boot all cores and enable the NPI in all of them. */
+ if (spm_run_multi_core_test(
+ (uintptr_t)npi_enable_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Prepare notifications bitmap to request Cactus to bind them as
+ * per-vCPU.
+ */
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ notifications_to_unbind |= FFA_NOTIFICATION(i);
+
+ uint32_t flags = FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID((uint16_t)i);
+
+ if (!notification_bind_and_set(sender,
+ receiver,
+ FFA_NOTIFICATION(i),
+ flags)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(exp_ids, exp_lists_count, exp_lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ exp_more_notif_pending)) {
+ ERROR("Info Get Failed....\n");
+ result = TEST_RESULT_FAIL;
+ goto out;
+ }
+
+ /*
+ * Request SP to get notifications in core 0, as this is not iterated
+ * at the CPU ON handler.
+ * Set `check_npi_handled` to true, as the receiver is supposed to be
+ * preempted by the NPI.
+ */
+ if (!notification_get_and_validate(
+ receiver, IS_SP_ID(sender) ? FFA_NOTIFICATION(0) : 0,
+ !IS_SP_ID(sender) ? FFA_NOTIFICATION(0) : 0, 0,
+ per_vcpu_flags_get, true)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Bring up all the cores, and request the receiver to get notifications
+ * in each one of them.
+ */
+ if (spm_run_multi_core_test(
+ (uintptr_t)request_notification_get_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ result = TEST_RESULT_FAIL;
+ }
+
+out:
+ /* As a clean-up, unbind notifications. */
+ if (!request_notification_unbind(receiver, receiver,
+ sender,
+ notifications_to_unbind,
+ CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Boot all cores and DISABLE the NPI in all of them. */
+ if (spm_run_multi_core_test(
+ (uintptr_t)npi_disable_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/**
+ * Test to validate a VM can signal a per-vCPU notification to an SP.
+ */
+test_result_t test_ffa_notifications_vm_signals_sp_per_vcpu(void)
+{
+ return base_test_per_vcpu_notifications(0, SP_ID(1));
+}
+
+/**
+ * Test to validate an SP can signal a per-vCPU notification to an SP.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp_per_vcpu(void)
+{
+ return base_test_per_vcpu_notifications(SP_ID(1), SP_ID(2));
+}
+
+static test_result_t notification_get_per_vcpu_on_handler(void)
+{
+ unsigned int core_pos = get_current_core_id();
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ VERBOSE("Getting per-vCPU notifications from %x, core: %u.\n",
+ per_vcpu_receiver, core_pos);
+
+ if (!spm_core_sp_init(per_vcpu_sender)) {
+ goto out;
+ }
+
+ if (!notification_get_and_validate(per_vcpu_receiver,
+ FFA_NOTIFICATION(core_pos), 0,
+ core_pos,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+/**
+ * Test whole flow from binding, to getting notifications' info, and getting
+ * pending notifications, namely signaling of notifications from SP to a VM.
+ * Each vCPU will receive a notification whose ID is the same as the core
+ * position.
+ */
+test_result_t test_ffa_notifications_sp_signals_vm_per_vcpu(void)
+{
+ /* Making a VM the receiver, and an SP the sender */
+ per_vcpu_receiver = VM_ID(1);
+ per_vcpu_sender = SP_ID(2);
+
+ /**
+ * Manually set variables to validate what should be the return of to
+ * FFA_NOTIFICATION_INFO_GET.
+ */
+ uint16_t exp_ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ per_vcpu_receiver, 0, 1, 2,
+ per_vcpu_receiver, 3, 4, 5,
+ per_vcpu_receiver, 6, 7, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ };
+ uint32_t exp_lists_count = 3;
+ uint32_t exp_lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {
+ 3, 3, 2, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ };
+
+ const bool exp_more_notif_pending = false;
+ test_result_t result = TEST_RESULT_SUCCESS;
+ uint64_t notifications_to_unbind = 0;
+ struct ffa_value ret;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Create bitmap for receiver. */
+ if (!notifications_bitmap_create(per_vcpu_receiver,
+ PLATFORM_CORE_COUNT)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Bind notifications, and request Cactus SP to set them. */
+ for (uint32_t i = 0U; i < PLATFORM_CORE_COUNT; i++) {
+ notifications_to_unbind |= FFA_NOTIFICATION(i);
+
+ uint32_t flags = FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID((uint16_t)i);
+
+ if (!notification_bind_and_set(per_vcpu_sender,
+ per_vcpu_receiver,
+ FFA_NOTIFICATION(i),
+ flags)) {
+ return TEST_RESULT_FAIL;
+ };
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(exp_ids, exp_lists_count, exp_lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ exp_more_notif_pending)) {
+ ERROR("Info Get Failed....\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Get notifications in core 0, as it is not iterated at the CPU ON
+ * handler.
+ */
+ if (!notification_get_and_validate(per_vcpu_receiver,
+ FFA_NOTIFICATION(0), 0, 0,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Bring up all the cores, and get notifications in each one of them. */
+ if (spm_run_multi_core_test(
+ (uintptr_t)notification_get_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ ERROR("Failed to get per-vCPU notifications\n");
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* As a clean-up, unbind notifications. */
+ ret = ffa_notification_unbind(per_vcpu_sender, per_vcpu_receiver,
+ notifications_to_unbind);
+ if (is_ffa_call_error(ret)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ return result;
+}
+
+/**
+ * Test to validate behavior in SWd if the SRI is not delayed. If the
+ * notification setter handled a managed exit it is indicative the SRI was
+ * sent immediately.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp_immediate_sri(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t sender = SP_ID(1);
+ const ffa_id_t receiver = SP_ID(2);
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_SP;
+ struct ffa_value ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ /** Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ ids[0] = receiver;
+ lists_count = 1;
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!request_notification_bind(receiver, receiver, sender,
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Request sender to set notification, and expect the response is
+ * MANAGED_EXIT_INTERRUPT_ID.
+ */
+ if (!request_notification_set(sender, receiver, sender, 0,
+ g_notifications, 0,
+ MANAGED_EXIT_INTERRUPT_ID, 0)) {
+ ERROR("SRI not handled immediately!\n");
+ result = TEST_RESULT_FAIL;
+ } else {
+ VERBOSE("SP %x did a managed exit.\n", sender);
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Validate notification get. */
+ if (!request_notification_get(receiver, receiver, 0, get_flags, false, &ret) ||
+ !is_notifications_get_as_expected(&ret, g_notifications, 0,
+ receiver)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Resume setter Cactus in the handling of CACTUS_NOTIFICATIONS_SET_CMD.
+ */
+ ret = cactus_resume_after_managed_exit(HYP_ID, sender);
+
+ /* Expected result to CACTUS_NOTIFICATIONS_SET_CMD. */
+ if (!is_expected_cactus_response(ret, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Unbind for clean-up. */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return result;
+}
+
+/**
+ * Test to validate behavior in SWd if the SRI is delayed.
+ */
+test_result_t test_ffa_notifications_sp_signals_sp_delayed_sri(void)
+{
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+ const ffa_id_t sender = SP_ID(3);
+ const ffa_id_t receiver = SP_ID(2);
+ const ffa_id_t echo_dest = SP_ID(1);
+ uint32_t echo_dest_cmd_count = 0;
+ uint32_t get_flags = FFA_NOTIFICATIONS_FLAG_BITMAP_SP;
+ struct ffa_value ret;
+ test_result_t result = TEST_RESULT_SUCCESS;
+
+ /** Variables to validate calls to FFA_NOTIFICATION_INFO_GET. */
+ uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+ uint32_t lists_count;
+ uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
+
+ ids[0] = receiver;
+ lists_count = 1;
+
+ schedule_receiver_interrupt_init();
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!request_notification_bind(receiver, receiver, sender,
+ g_notifications, 0, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ ret = cactus_get_req_count_send_cmd(HYP_ID, echo_dest);
+
+ if (cactus_get_response(ret) == CACTUS_SUCCESS) {
+ /*
+ * Save the command count from the echo_dest, to validate it
+ * has been incremented after the request to set notifications.
+ */
+ echo_dest_cmd_count = cactus_get_req_count(ret);
+ VERBOSE("Partition %x command count %u.\n", echo_dest,
+ echo_dest_cmd_count);
+ } else {
+ VERBOSE("Failed to get cmds count from %u\n", echo_dest);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Request sender to set notification with Delay SRI flag, and specify
+ * echo destination.
+ */
+ if (!request_notification_set(sender, receiver, sender,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI,
+ g_notifications, echo_dest,
+ CACTUS_SUCCESS, 0)) {
+ VERBOSE("Failed to set notifications!\n");
+ result = TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Get command count again from echo_dest, to validate that it has been
+ * incremented by one. This should indicate the notification setter has
+ * issued a request to echo_dest right after the notification set, thus
+ * proving the SRI hasn't been sent right after FFA_NOTIFICATION_SET.
+ */
+ ret = cactus_get_req_count_send_cmd(HYP_ID, echo_dest);
+ if (cactus_get_response(ret) == CACTUS_SUCCESS) {
+ if (cactus_get_req_count(ret) == echo_dest_cmd_count + 1) {
+ VERBOSE("SRI successfully delayed.\n");
+ } else {
+ VERBOSE("Failed to get cmds count from %u.\n",
+ echo_dest);
+ result = TEST_RESULT_FAIL;
+ }
+ } else {
+ VERBOSE("Failed to get cmds count from %x\n", echo_dest);
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Call FFA_NOTIFICATION_INFO_GET and validate return. */
+ if (!notifications_info_get(ids, lists_count, lists_sizes,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
+ false)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Validate notification get. */
+ if (!request_notification_get(receiver, receiver, 0, get_flags, false, &ret) ||
+ !is_notifications_get_as_expected(&ret, g_notifications, 0,
+ receiver)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ /* Unbind for clean-up. */
+ if (!request_notification_unbind(receiver, receiver, sender,
+ g_notifications, CACTUS_SUCCESS, 0)) {
+ result = TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return result;
+}
+
+test_result_t notifications_set_per_vcpu_on_handler(void)
+{
+ unsigned int core_pos = get_current_core_id();
+ test_result_t result = TEST_RESULT_FAIL;
+
+ if (!spm_core_sp_init(per_vcpu_sender)) {
+ goto out;
+ }
+
+ if (!notification_set(per_vcpu_receiver, per_vcpu_sender,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID(0),
+ FFA_NOTIFICATION(core_pos))) {
+ goto out;
+ }
+
+ result = TEST_RESULT_SUCCESS;
+
+out:
+ /* Tell the lead CPU that the calling CPU has completed the test. */
+ tftf_send_event(&per_vcpu_finished[core_pos]);
+
+ return result;
+}
+
+test_result_t test_ffa_notifications_mp_sp_signals_up_sp(void)
+{
+ ffa_notification_bitmap_t to_bind = 0;
+
+ /* prepare info get variables. */
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Setting per-vCPU sender and receiver IDs. */
+ per_vcpu_sender = SP_ID(2); /* MP SP */
+ per_vcpu_receiver = SP_ID(3); /* UP SP */
+
+ schedule_receiver_interrupt_init();
+
+ /* Prepare notifications bitmap to have one bit platform core. */
+ for (uint32_t i = 0; i < PLATFORM_CORE_COUNT; i++) {
+ to_bind |= FFA_NOTIFICATION(i);
+ }
+
+ /* Request receiver to bind a set of notifications to the sender. */
+ if (!request_notification_bind(per_vcpu_receiver, per_vcpu_receiver,
+ per_vcpu_sender, to_bind,
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU,
+ CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Boot up system, and then request sender to signal notification from
+ * every core into into receiver's only vCPU. Delayed SRI.
+ */
+ if (!notification_set(per_vcpu_receiver, per_vcpu_sender,
+ FFA_NOTIFICATIONS_FLAG_DELAY_SRI |
+ FFA_NOTIFICATIONS_FLAG_PER_VCPU |
+ FFA_NOTIFICATIONS_FLAGS_VCPU_ID(0),
+ FFA_NOTIFICATION(0))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (spm_run_multi_core_test(
+ (uintptr_t)notifications_set_per_vcpu_on_handler,
+ per_vcpu_finished) != TEST_RESULT_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!check_schedule_receiver_interrupt_handled()) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (!notification_get_and_validate(per_vcpu_receiver, to_bind, 0, 0,
+ FFA_NOTIFICATIONS_FLAG_BITMAP_SP, true)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Request unbind. */
+ if (!request_notification_unbind(per_vcpu_receiver, per_vcpu_receiver,
+ per_vcpu_sender, to_bind,
+ CACTUS_SUCCESS, 0)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ schedule_receiver_interrupt_deinit();
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c b/tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c
deleted file mode 100644
index 1b47c5f99..000000000
--- a/tftf/tests/runtime_services/secure_service/test_ffa_rxtx_map.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <debug.h>
-
-#include <test_helpers.h>
-#include <xlat_tables_defs.h>
-
-static struct mailbox_buffers mb;
-
-static test_result_t test_ffa_rxtx_map(uint32_t expected_return)
-{
- smc_ret_values ret;
-
- /**********************************************************************
- * Verify that FFA is there and that it has the correct version.
- **********************************************************************/
- SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
-
- /**********************************************************************
- * If OP-TEE is SPMC skip this test.
- **********************************************************************/
- if (check_spmc_execution_level()) {
- VERBOSE("OP-TEE as SPMC at S-EL1. Skipping test!\n");
- return TEST_RESULT_SKIPPED;
- }
-
- /*
- * Declare RXTX buffers, assign them to the mailbox and call
- * FFA_RXTX_MAP.
- */
- CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
- if (ffa_func_id(ret) != expected_return) {
- ERROR("Failed to map RXTX buffers %x!\n", ffa_error_code(ret));
- return TEST_RESULT_FAIL;
- }
-
- return TEST_RESULT_SUCCESS;
-}
-
-/**
- * Test mapping RXTX buffers from NWd.
- * This test also sets the Mailbox for other SPM related tests that need to use
- * RXTX buffers.
- */
-test_result_t test_ffa_rxtx_map_success(void)
-{
- test_result_t ret = test_ffa_rxtx_map(FFA_SUCCESS_SMC32);
-
- if (ret == TEST_RESULT_SUCCESS) {
- INFO("Set RXTX Mailbox for remaining spm tests!\n");
- set_tftf_mailbox(&mb);
- }
- return ret;
-}
-
-/**
- * Test to verify that 2nd call to FFA_RXTX_MAP should fail.
- */
-test_result_t test_ffa_rxtx_map_fail(void)
-{
- INFO("This test expects error log.\n");
- return test_ffa_rxtx_map(FFA_ERROR);
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c b/tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c
new file mode 100644
index 000000000..40e52c94a
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_secure_interrupts.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <mmio.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <timer.h>
+
+#include <drivers/arm/arm_gic.h>
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+#define RECEIVER_2 SP_ID(2)
+#define SP_SLEEP_TIME 1000U
+#define NS_TIME_SLEEP 1500U
+#define ECHO_VAL1 U(0xa0a0a0a0)
+
+static const struct ffa_uuid expected_sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}
+ };
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while first Secure Partition is
+ * in RUNNING state.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Send a command to SP to first sleep( by executing a busy loop), then
+ * restart trusted watchdog timer and then sleep again.
+ *
+ * 3. While SP is running the first busy loop, Secure interrupt should trigger
+ * during this time.
+ *
+ * 4. The interrupt will be trapped to SPM as IRQ. SPM will inject the virtual
+ * IRQ to the first SP through vIRQ conduit and perform eret to resume
+ * execution in SP.
+ *
+ * 5. Execution traps to irq handler of Cactus SP. It will handle the secure
+ * interrupt triggered by the trusted watchdog timer.
+ *
+ * 6. Cactus SP will perform End-Of-Interrupt and resume execution in the busy
+ * loop.
+ *
+ * 7. Trusted watchdog timer will trigger once again followed by steps 4 to 6.
+ *
+ * 8. Cactus SP will send a direct response message with the elapsed time back
+ * to the normal world.
+ *
+ * 9. We make sure the time elapsed in the sleep routine by SP is not less than
+ * the requested value.
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 11. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 12. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ *
+ */
+
+test_result_t test_ffa_sec_interrupt_sp_running(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 50);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to first Cactus SP to sleep */
+ ret_values = cactus_sleep_trigger_wdog_cmd(SENDER, RECEIVER, SP_SLEEP_TIME, 50);
+
+ /*
+ * Secure interrupt should trigger during this time, Cactus
+ * will handle the trusted watchdog timer.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for sleep command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Secure interrupt has preempted execution: %u\n",
+ cactus_get_response(ret_values));
+
+ /* Make sure elapsed time not less than sleep time */
+ if (cactus_get_response(ret_values) < SP_SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while Secure Partition is waiting
+ * for a message.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Once the SP returns with a direct response message, it moves to WAITING
+ state.
+ *
+ * 3. Execute a busy loop to sleep for NS_TIME_SLEEP ms.
+ *
+ * 4. Trusted watchdog timer expires during this time which leads to secure
+ * interrupt being triggered while cpu is executing in normal world.
+ *
+ * 5. The interrupt is trapped to BL31/SPMD as FIQ and later synchronously
+ * delivered to SPM.
+ *
+ * 6. SPM injects a virtual IRQ to first Cactus Secure Partition.
+ *
+ * 7. Once the SP handles the interrupt, it returns execution back to normal
+ * world using FFA_MSG_WAIT call.
+ *
+ * 8. SPM, through the help of SPMD, resumes execution in normal world to
+ * continue the busy loop.
+ *
+ * 9. We make sure the time elapsed in the sleep routine is not less than
+ * the requested value.
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 11. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 12. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ *
+ */
+test_result_t test_ffa_sec_interrupt_sp_waiting(void)
+{
+ uint64_t time1;
+ volatile uint64_t time2, time_lapsed;
+ uint64_t timer_freq = read_cntfrq_el0();
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send a message to SP1 through direct messaging.
+ */
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 100);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ time1 = syscounter_read();
+
+ /*
+ * Sleep for NS_TIME_SLEEP ms. This ensures secure wdog timer triggers during this
+ * time. We explicitly do not use tftf_timer_sleep();
+ */
+ waitms(NS_TIME_SLEEP);
+ time2 = syscounter_read();
+
+ /* Lapsed time should be at least equal to sleep time */
+ time_lapsed = ((time2 - time1) * 1000) / timer_freq;
+
+ if (time_lapsed < NS_TIME_SLEEP) {
+ ERROR("Time elapsed less than expected value: %llu vs %u\n",
+ time_lapsed, NS_TIME_SLEEP);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while first Secure Partition is
+ * in BLOCKED state.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Send a direct request to first SP to forward sleep command to second SP.
+ *
+ * 3. While second SP is running the busy loop, Secure interrupt should trigger
+ * during this time.
+ *
+ * 4. The interrupt will be trapped to SPM as IRQ. SPM will inject the virtual
+ * IRQ to the first SP through vIRQ conduit and perform eret to resume
+ * execution in first SP.
+ *
+ * 5. Execution traps to irq handler of Cactus SP. It will handle the secure
+ * interrupt triggered by the trusted watchdog timer.
+ *
+ * 6. First SP performs EOI by calling interrupt deactivate ABI and invokes
+ * FFA_RUN to resume second SP in the busy loop.
+ *
+ * 7. Second SP will complete the busy sleep loop and send a direct response
+ * message with the elapsed time back to the first SP.
+ *
+ * 8. First SP checks for the elapsed time and sends a direct response with
+ * a SUCCESS value back to tftf.
+ *
+ * 9. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 10. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 11. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ */
+test_result_t test_ffa_sec_interrupt_sp_blocked(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 100);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Send request to first Cactus SP to send request to Second Cactus
+ * SP to sleep
+ */
+ ret_values = cactus_fwd_sleep_cmd(SENDER, RECEIVER, RECEIVER_2,
+ SP_SLEEP_TIME, false);
+
+ /*
+ * Secure interrupt should trigger during this time, Cactus
+ * will handle the trusted watchdog timer.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test secure interrupt handling while first Secure Partition is
+ * in WAITING state while the second Secure Partition is running.
+ *
+ * 1. Send a direct message request command to first Cactus SP to start the
+ * trusted watchdog timer.
+ *
+ * 2. Send a direct request to second SP to sleep by executing a busy loop.
+ *
+ * 3. While second SP is running the busy loop, Secure interrupt should trigger
+ * during this time.
+ *
+ * 4. The interrupt is trapped to the SPM as a physical IRQ. The SPM injects a
+ * virtual IRQ to the first SP and resumes it while it is in waiting state.
+ *
+ * 5. Execution traps to irq handler of the first Cactus SP. It will handle the
+ * secure interrupt triggered by the trusted watchdog timer.
+ *
+ * 6. Cactus SP will perform End-Of-Interrupt by calling the interrupt
+ * deactivate HVC and invoke FFA_MSG_WAIT ABI to perform interrupt signal
+ * completion.
+ *
+ * 7. SPM then resumes the second SP which was preempted by secure interrupt.
+ *
+ * 8. Second SP will complete the busy sleep loop and send a direct response
+ * message with the elapsed time back to the first SP.
+ *
+ * 9. We make sure the time elapsed in the sleep routine by SP is not less than
+ * the requested value.
+ *
+ * 10. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 11. Further, TFTF expects SP to return the ID of Trusted Watchdog timer
+ * interrupt through a direct response message.
+ *
+ * 12. Test finishes successfully once the TFTF disables the trusted watchdog
+ * interrupt through a direct message request command.
+ */
+test_result_t test_ffa_sec_interrupt_sp1_waiting_sp2_running(void)
+{
+ struct ffa_value ret_values;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable trusted watchdog interrupt as IRQ in the secure side. */
+ if (!enable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret_values = cactus_send_twdog_cmd(SENDER, RECEIVER, 100);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for starting TWDOG timer\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Send request to Second Cactus SP to sleep. */
+ ret_values = cactus_sleep_cmd(SENDER, RECEIVER_2, SP_SLEEP_TIME);
+
+ /*
+ * Secure interrupt should trigger during this time, Cactus
+ * will handle the trusted watchdog timer.
+ */
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for sleep command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure elapsed time not less than sleep time. */
+ if (cactus_get_response(ret_values) < SP_SLEEP_TIME) {
+ ERROR("Lapsed time less than requested sleep time\n");
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_TWDOG_INTID) {
+ ERROR("Trusted watchdog timer interrupt not serviced by SP\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable Trusted Watchdog interrupt. */
+ if (!disable_trusted_wdog_interrupt(SENDER, RECEIVER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * @Test_Aim@ Test handling of interrupt belonging to the extended SPI range
+ * while first Secure Partition is in RUNNING state.
+ *
+ * 1. Send a direct message request command to first Cactus SP to trigger the
+ * eSPI interrupt.
+ *
+ * 2. The Cactus SP either successfully handles the interrupt or fails to do
+ * so. It sends a value through direct message response indicating if the
+ * interrupt was handled.
+ *
+ * 3. TFTF sends a direct request message to SP to query the ID of last serviced
+ * secure virtual interrupt.
+ *
+ * 4. Further, TFTF expects SP to return the appropriate interrupt id through a
+ * direct response message.
+ */
+test_result_t test_ffa_espi_sec_interrupt(void)
+{
+ struct ffa_value ret_values;
+
+ /* Check if extended SPI range is implemented by GIC. */
+ if (!arm_gic_is_espi_supported()) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /* Enable ESPI. */
+ ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, IRQ_ESPI_TEST_INTID,
+ true, INTERRUPT_TYPE_IRQ);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while configuring"
+ " interrupt ESPI %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ ERROR("Failed to configure ESPI interrupt\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Trigger ESPI while running. */
+ ret_values = cactus_trigger_espi_cmd(SENDER, RECEIVER, IRQ_ESPI_TEST_INTID);
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while triggering"
+ " interrupt ESPI %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != 1) {
+ ERROR("Interrupt %u not serviced by SP\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Disable ESPI. */
+ ret_values = cactus_interrupt_cmd(SENDER, RECEIVER, IRQ_ESPI_TEST_INTID,
+ false, INTERRUPT_TYPE_IRQ);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response message while configuring"
+ " interrupt ESPI %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret_values) != CACTUS_SUCCESS) {
+ ERROR("Failed to configure ESPI interrupt %u\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check for the last serviced secure virtual interrupt. */
+ ret_values = cactus_get_last_interrupt_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret_values)) {
+ ERROR("Expected a direct response for last serviced interrupt"
+ " command\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Make sure Trusted Watchdog timer interrupt was serviced*/
+ if (cactus_get_response(ret_values) != IRQ_ESPI_TEST_INTID) {
+ ERROR("ESPI interrupt %u not serviced by SP\n", IRQ_ESPI_TEST_INTID);
+ return TEST_RESULT_FAIL;
+ }
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c b/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c
new file mode 100644
index 000000000..80a3015a7
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_setup_and_discovery.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2020-2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <spm_common.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_defs.h>
+
+static bool should_skip_version_test;
+
+static struct mailbox_buffers mb;
+
+static const struct ffa_uuid sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}, {IVY_UUID}
+ };
+
+static const struct ffa_partition_info ffa_expected_partition_info[] = {
+ /* Primary partition info */
+ {
+ .id = SP_ID(1),
+ .exec_context = PRIMARY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_NOTIFICATION,
+ .uuid = {PRIMARY_UUID}
+ },
+ /* Secondary partition info */
+ {
+ .id = SP_ID(2),
+ .exec_context = SECONDARY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_NOTIFICATION,
+ .uuid = {SECONDARY_UUID}
+ },
+ /* Tertiary partition info */
+ {
+ .id = SP_ID(3),
+ .exec_context = TERTIARY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV |
+ FFA_PARTITION_NOTIFICATION,
+ .uuid = {TERTIARY_UUID}
+ },
+ /* Ivy partition info */
+ {
+ .id = SP_ID(4),
+ .exec_context = IVY_EXEC_CTX_COUNT,
+ .properties = FFA_PARTITION_AARCH64_EXEC |
+ FFA_PARTITION_DIRECT_REQ_RECV,
+ .uuid = {IVY_UUID}
+ }
+};
+
+/*
+ * Using FFA version expected for SPM.
+ */
+#define SPM_VERSION MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR)
+
+/******************************************************************************
+ * FF-A Features ABI Tests
+ ******************************************************************************/
+
+test_result_t test_ffa_features(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
+
+ /* Check if SPMC is OP-TEE at S-EL1 */
+ if (check_spmc_execution_level()) {
+ /* FFA_FEATURES is not yet supported in OP-TEE */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ struct ffa_value ffa_ret;
+ unsigned int expected_ret;
+ const struct ffa_features_test *ffa_feature_test_target;
+ unsigned int i, test_target_size =
+ get_ffa_feature_test_target(&ffa_feature_test_target);
+ struct ffa_features_test test_target;
+
+ for (i = 0U; i < test_target_size; i++) {
+ test_target = ffa_feature_test_target[i];
+ ffa_ret = ffa_features_with_input_property(test_target.feature, test_target.param);
+ expected_ret = FFA_VERSION_COMPILED
+ >= test_target.version_added ?
+ test_target.expected_ret : FFA_ERROR;
+ if (ffa_func_id(ffa_ret) != expected_ret) {
+ tftf_testcase_printf("%s returned %x, expected %x\n",
+ test_target.test_name,
+ ffa_func_id(ffa_ret),
+ expected_ret);
+ return TEST_RESULT_FAIL;
+ }
+ if ((expected_ret == FFA_ERROR) &&
+ (ffa_error_code(ffa_ret) != FFA_ERROR_NOT_SUPPORTED)) {
+ tftf_testcase_printf("%s failed for the wrong reason: "
+ "returned %x, expected %x\n",
+ test_target.test_name,
+ ffa_error_code(ffa_ret),
+ FFA_ERROR_NOT_SUPPORTED);
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************
+ * FF-A Version ABI Tests
+ ******************************************************************************/
+
+/*
+ * Calls FFA Version ABI, and checks if the result as expected.
+ */
+static test_result_t test_ffa_version(uint32_t input_version,
+ uint32_t expected_return)
+{
+ if (should_skip_version_test) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ struct ffa_value ret_values = ffa_version(input_version);
+
+ uint32_t spm_version = (uint32_t)(0xFFFFFFFF & ret_values.fid);
+
+ if (spm_version == expected_return) {
+ return TEST_RESULT_SUCCESS;
+ }
+
+ tftf_testcase_printf("Input Version: 0x%x\n"
+ "Return: 0x%x\nExpected: 0x%x\n",
+ input_version, spm_version, expected_return);
+
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * @Test_Aim@ Validate what happens when using same version as SPM.
+ */
+test_result_t test_ffa_version_equal(void)
+{
+ /*
+ * FFA_VERSION interface is used to check that SPM functionality is
+ * supported. On FFA_VERSION invocation from TFTF, the SPMD returns
+ * either NOT_SUPPORTED or the SPMC version value provided in the SPMC
+ * manifest. The variable "should_skip_test" is set to true when the
+ * SPMD returns NOT_SUPPORTED or a mismatched version, which means that
+ * a TFTF physical FF-A endpoint version (SPM_VERSION) does not match
+ * the SPMC's physical FF-A endpoint version. This prevents running the
+ * subsequent FF-A version tests (and break the test flow), as they're
+ * not relevant when the SPMD is not present within BL31
+ * (FFA_VERSION returns NOT_SUPPORTED).
+ */
+ test_result_t ret = test_ffa_version(SPM_VERSION, SPM_VERSION);
+
+ if (ret != TEST_RESULT_SUCCESS) {
+ should_skip_version_test = true;
+ ret = TEST_RESULT_SKIPPED;
+ }
+ return ret;
+}
+
+/*
+ * @Test_Aim@ Validate what happens when setting bit 31 in
+ * 'input_version'. As per spec, FFA version is 31 bits long.
+ * Bit 31 set is an invalid input.
+ */
+test_result_t test_ffa_version_bit31(void)
+{
+ return test_ffa_version(FFA_VERSION_BIT31_MASK | SPM_VERSION,
+ FFA_ERROR_NOT_SUPPORTED);
+}
+
+/*
+ * @Test_Aim@ Validate what happens for bigger version than SPM's.
+ */
+test_result_t test_ffa_version_bigger(void)
+{
+ return test_ffa_version(MAKE_FFA_VERSION(FFA_VERSION_MAJOR + 1, 0),
+ FFA_ERROR_NOT_SUPPORTED);
+}
+
+/*
+ * @Test_Aim@ Validate what happens for smaller version than SPM's.
+ */
+test_result_t test_ffa_version_smaller(void)
+{
+ return test_ffa_version(MAKE_FFA_VERSION(0, 9),
+ FFA_ERROR_NOT_SUPPORTED);
+}
+
+/******************************************************************************
+ * FF-A RXTX ABI Tests
+ ******************************************************************************/
+
+static test_result_t test_ffa_rxtx_map(uint32_t expected_return)
+{
+ struct ffa_value ret;
+
+ /**********************************************************************
+ * Verify that FFA is there and that it has the correct version.
+ **********************************************************************/
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
+
+ /**********************************************************************
+ * If OP-TEE is SPMC skip this test.
+ **********************************************************************/
+ if (check_spmc_execution_level()) {
+ VERBOSE("OP-TEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /*
+ * Declare RXTX buffers, assign them to the mailbox and call
+ * FFA_RXTX_MAP.
+ */
+ CONFIGURE_AND_MAP_MAILBOX(mb, PAGE_SIZE, ret);
+ if (ffa_func_id(ret) != expected_return) {
+ ERROR("Failed to map RXTX buffers %x!\n", ffa_error_code(ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test mapping RXTX buffers from NWd.
+ */
+test_result_t test_ffa_rxtx_map_success(void)
+{
+ return test_ffa_rxtx_map(FFA_SUCCESS_SMC32);
+}
+
+/**
+ * Test to verify that 2nd call to FFA_RXTX_MAP should fail.
+ */
+test_result_t test_ffa_rxtx_map_fail(void)
+{
+ VERBOSE("This test expects error log.\n");
+ return test_ffa_rxtx_map(FFA_ERROR);
+}
+
+static test_result_t test_ffa_rxtx_unmap(uint32_t expected_return)
+{
+ struct ffa_value ret;
+
+ /**********************************************************************
+ * Verify that FFA is there and that it has the correct version.
+ **********************************************************************/
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 0);
+
+ /**********************************************************************
+ * If OP-TEE is SPMC skip this test.
+ **********************************************************************/
+ if (check_spmc_execution_level()) {
+ VERBOSE("OP-TEE as SPMC at S-EL1. Skipping test!\n");
+ return TEST_RESULT_SKIPPED;
+ }
+
+ ret = ffa_rxtx_unmap();
+ if (!is_expected_ffa_return(ret, expected_return)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Test unmapping RXTX buffers from NWd.
+ */
+test_result_t test_ffa_rxtx_unmap_success(void)
+{
+ return test_ffa_rxtx_unmap(FFA_SUCCESS_SMC32);
+}
+
+/**
+ * Test to verify that 2nd call to FFA_RXTX_UNMAP should fail.
+ */
+test_result_t test_ffa_rxtx_unmap_fail(void)
+{
+ VERBOSE("This test expects error log.\n");
+ return test_ffa_rxtx_unmap(FFA_ERROR);
+}
+
+/**
+ * Test mapping RXTX buffers that have been previously unmapped from NWd.
+ */
+test_result_t test_ffa_rxtx_map_unmapped_success(void)
+{
+ test_result_t ret = test_ffa_rxtx_map(FFA_SUCCESS_SMC32);
+ /*
+ * Unmapping buffers such that further tests can map and use RXTX
+ * buffers.
+ * Subsequent attempts to map the RXTX buffers will fail, if this is
+ * invoked at this point.
+ */
+ ffa_rxtx_unmap();
+ return ret;
+}
+
+/*
+ * The FFA_RXTX_UNMAP specification at the NS physical FF-A instance allows for
+ * an ID to be given to the SPMC. The ID should relate to a VM that had its ID
+ * previously forwarded to the SPMC.
+ * This test validates that calls to FFA_RXTX_UNMAP from the NS physical
+ * instance can't unmap RXTX buffer pair of an SP.
+ */
+test_result_t test_ffa_rxtx_unmap_fail_if_sp(void)
+{
+ struct ffa_value ret;
+ struct ffa_value args;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+
+ /* Invoked FFA_RXTX_UNMAP, providing the ID of an SP in w1. */
+ args = (struct ffa_value) {
+ .fid = FFA_RXTX_UNMAP,
+ .arg1 = SP_ID(1) << 16,
+ .arg2 = FFA_PARAM_MBZ,
+ .arg3 = FFA_PARAM_MBZ,
+ .arg4 = FFA_PARAM_MBZ,
+ .arg5 = FFA_PARAM_MBZ,
+ .arg6 = FFA_PARAM_MBZ,
+ .arg7 = FFA_PARAM_MBZ
+ };
+
+ ret = ffa_service_call(&args);
+
+ if (!is_expected_ffa_error(ret, FFA_ERROR_INVALID_PARAMETER)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************
+ * FF-A SPM_ID_GET ABI Tests
+ ******************************************************************************/
+
+test_result_t test_ffa_spm_id_get(void)
+{
+ SKIP_TEST_IF_FFA_VERSION_LESS_THAN(1, 1);
+
+ struct ffa_value ffa_ret = ffa_spm_id_get();
+
+ if (is_ffa_call_error(ffa_ret)) {
+ ERROR("FFA_SPM_ID_GET call failed! Error code: 0x%x\n",
+ ffa_error_code(ffa_ret));
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Check the SPMC value given in the fvp_spmc_manifest is returned */
+ ffa_id_t spm_id = ffa_endpoint_id(ffa_ret);
+
+ if (spm_id != SPMC_ID) {
+ ERROR("Expected SPMC_ID of 0x%x\n received: 0x%x\n",
+ SPMC_ID, spm_id);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/******************************************************************************
+ * FF-A PARTITION_INFO_GET ABI Tests
+ ******************************************************************************/
+
+/**
+ * Attempt to get the SP partition information for individual partitions as well
+ * as all secure partitions.
+ */
+test_result_t test_ffa_partition_info(void)
+{
+ /***********************************************************************
+ * Check if SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ if (!ffa_partition_info_helper(&mb, sp_uuids[0],
+ &ffa_expected_partition_info[0], 1)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!ffa_partition_info_helper(&mb, sp_uuids[1],
+ &ffa_expected_partition_info[1], 1)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!ffa_partition_info_helper(&mb, sp_uuids[2],
+ &ffa_expected_partition_info[2], 1)) {
+ return TEST_RESULT_FAIL;
+ }
+ if (!ffa_partition_info_helper(&mb, NULL_UUID,
+ ffa_expected_partition_info,
+ ARRAY_SIZE(ffa_expected_partition_info))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * Attempt to get v1.0 partition info descriptors.
+ */
+test_result_t test_ffa_partition_info_v1_0(void)
+{
+ /**************************************************************
+ * Check if SPMC has ffa_version and expected FFA endpoints
+ * are deployed.
+ *************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 0, sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ test_result_t result = TEST_RESULT_SUCCESS;
+ struct ffa_value ret = ffa_partition_info_get(NULL_UUID);
+ uint64_t expected_size = ARRAY_SIZE(ffa_expected_partition_info);
+
+ if (ffa_func_id(ret) == FFA_SUCCESS_SMC32) {
+ if (ffa_partition_info_count(ret) != expected_size) {
+ ERROR("Unexpected number of partitions %d\n",
+ ffa_partition_info_count(ret));
+ return TEST_RESULT_FAIL;
+ }
+ if (ffa_partition_info_desc_size(ret) !=
+ sizeof(struct ffa_partition_info_v1_0)) {
+ ERROR("Unexepcted partition info descriptor size %d\n",
+ ffa_partition_info_desc_size(ret));
+ return TEST_RESULT_FAIL;
+ }
+ const struct ffa_partition_info_v1_0 *info =
+ (const struct ffa_partition_info_v1_0 *)(mb.recv);
+
+ for (unsigned int i = 0U; i < expected_size; i++) {
+ uint32_t expected_properties_v1_0 =
+ ffa_expected_partition_info[i].properties &
+ ~FFA_PARTITION_v1_0_RES_MASK;
+
+ if (info[i].id != ffa_expected_partition_info[i].id) {
+ ERROR("Wrong ID. Expected %x, got %x\n",
+ ffa_expected_partition_info[i].id,
+ info[i].id);
+ result = TEST_RESULT_FAIL;
+ }
+ if (info[i].exec_context !=
+ ffa_expected_partition_info[i].exec_context) {
+ ERROR("Wrong context. Expected %d, got %d\n",
+ ffa_expected_partition_info[i].exec_context,
+ info[i].exec_context);
+ result = TEST_RESULT_FAIL;
+ }
+ if (info[i].properties !=
+ expected_properties_v1_0) {
+ ERROR("Wrong properties. Expected %d, got %d\n",
+ expected_properties_v1_0,
+ info[i].properties);
+ result = TEST_RESULT_FAIL;
+ }
+ }
+ }
+
+ ret = ffa_rx_release();
+ if (is_ffa_call_error(ret)) {
+ ERROR("Failed to release RX buffer\n");
+ result = TEST_RESULT_FAIL;
+ }
+ return result;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_smccc.c b/tftf/tests/runtime_services/secure_service/test_ffa_smccc.c
new file mode 100644
index 000000000..15ca712a0
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_smccc.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <debug.h>
+
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <ffa_svc.h>
+#include <runtime_services/spm_test_helpers.h>
+#include <spm_common.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <xlat_tables_defs.h>
+
+#define expect_eq(expr, value) \
+ do { \
+ if ((expr) != (value)) { \
+ ERROR("expect failed %s:%u\n", __FILE__, __LINE__); \
+ return TEST_RESULT_FAIL; \
+ } \
+ } while (0);
+
+static const struct ffa_uuid sp_uuids[] = {
+ {PRIMARY_UUID}, {SECONDARY_UUID}, {TERTIARY_UUID}, {IVY_UUID}
+ };
+
+struct ffa_value8 {
+ u_register_t fid;
+ u_register_t arg1;
+ u_register_t arg2;
+ u_register_t arg3;
+ u_register_t arg4;
+ u_register_t arg5;
+ u_register_t arg6;
+ u_register_t arg7;
+};
+
+/* Declared in test_ffa_smccc_asm.S. */
+uint32_t test_ffa_smc(struct ffa_value8 *);
+uint32_t test_ffa_smc_ext(struct ffa_value *);
+
+/**
+ * FF-A service calls are emitted at the NS physical FF-A instance.
+ * Such services do not return results in registers beyond x7.
+ * Check callee(s) preserves GP registers beyond x7 per SMCCCv1.2.
+ */
+test_result_t test_smccc_callee_preserved(void)
+{
+ struct ffa_value8 args;
+ struct mailbox_buffers mb;
+
+ /*
+ * Permit running the test on configurations running
+ * the S-EL2 SPMC where 4 test partitions are deployed.
+ */
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+ reset_tftf_mailbox();
+
+ /* Declare RX/TX buffers locally to the test. */
+ CONFIGURE_MAILBOX(mb, PAGE_SIZE);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_VERSION;
+ args.arg1 = 0x10001;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_VERSION_COMPILED);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_ID_GET;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_RXTX_MAP_SMC64;
+ args.arg1 = (uintptr_t)mb.send;
+ args.arg2 = (uintptr_t)mb.recv;
+ args.arg3 = 1;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_PARTITION_INFO_GET;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, ARRAY_SIZE(sp_uuids));
+ expect_eq(args.arg3, sizeof(struct ffa_partition_info));
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_RX_RELEASE;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ memset(&args, 0, sizeof(struct ffa_value8));
+ args.fid = FFA_RXTX_UNMAP;
+ expect_eq(test_ffa_smc(&args), 0);
+ expect_eq(args.fid, FFA_SUCCESS_SMC32);
+ expect_eq(args.arg1, 0);
+ expect_eq(args.arg2, 0);
+ expect_eq(args.arg3, 0);
+ expect_eq(args.arg4, 0);
+ expect_eq(args.arg5, 0);
+ expect_eq(args.arg6, 0);
+ expect_eq(args.arg7, 0);
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/**
+ * An FF-A service call is emitted at the NS physical FF-A instance.
+ * The service returns results in x0-x17 registers.
+ * Check callee(s) preserve GP registers beyond x17 per SMCCCv1.2.
+ */
+test_result_t test_smccc_ext_callee_preserved(void)
+{
+ struct ffa_value args_ext;
+
+ CHECK_SPMC_TESTING_SETUP(1, 1, sp_uuids);
+
+ /* Test the SMCCC extended registers range. */
+ memset(&args_ext, 0, sizeof(struct ffa_value));
+ args_ext.fid = FFA_PARTITION_INFO_GET_REGS_SMC64;
+ expect_eq(test_ffa_smc_ext(&args_ext), 0);
+ expect_eq(args_ext.fid, FFA_SUCCESS_SMC64);
+ expect_eq(args_ext.arg1, 0);
+ expect_eq(args_ext.arg2 >> 48, sizeof(struct ffa_partition_info));
+ expect_eq(args_ext.arg2 & 0xffff, ARRAY_SIZE(sp_uuids) - 1);
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S b/tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S
new file mode 100644
index 000000000..00d82eeb3
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_smccc_asm.S
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+
+ .global test_ffa_smc
+ .global test_ffa_smc_ext
+
+ .section .text, "ax"
+
+/**
+ * test_ffa_smc
+ *
+ * x0 - ptr to a struct ffa_value8
+ *
+ * This function is used to test FF-A ABIs on top of SMCCCv1.2 for interfaces
+ * requiring at most 8 input/output registers.
+ * Load 8 GP input registers, move a pattern into x8-x29 and emit an SMC.
+ * On return save 8 output registers to the structure and compare x8-x29
+ * to the known pattern. If a register was altered it indicates an SMCCC
+ * violation and the function returns with a value greater than 0.
+ * The function returns 0 on success.
+ */
+func test_ffa_smc
+ /* Save FP/LR. */
+ stp x29, x30, [sp, #-16]!
+
+ /* Save x19-x28 per AAPCS64. */
+ stp x27, x28, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+
+ /*
+ * Store the struct ffa_value pointer to x30.
+ * x30 is preserved by the SMC callee.
+ */
+ mov x30, x0
+
+ /* Load the SMC service input values. */
+ ldp x0, x1, [x30]
+ ldp x2, x3, [x30, #16]
+ ldp x4, x5, [x30, #32]
+ ldp x6, x7, [x30, #48]
+ mov x8, #0xa8
+ add x9, x8, #1
+ add x10, x8, #2
+ add x11, x8, #3
+ add x12, x8, #4
+ add x13, x8, #5
+ add x14, x8, #6
+ add x15, x8, #7
+ add x16, x8, #8
+ add x17, x8, #9
+ add x18, x8, #10
+ add x19, x8, #11
+ add x20, x8, #12
+ add x21, x8, #13
+ add x22, x8, #14
+ add x23, x8, #15
+ add x24, x8, #16
+ add x25, x8, #17
+ add x26, x8, #18
+ add x27, x8, #19
+ add x28, x8, #20
+ add x29, x8, #21
+ smc #0
+
+ /* Store the SMC service return values. */
+ stp x0, x1, [x30]
+ stp x2, x3, [x30, #16]
+ stp x4, x5, [x30, #32]
+ stp x6, x7, [x30, #48]
+
+ /* Check if SMC callee-preserved registers were altered. */
+ mov w0, wzr
+ cmp x8, #0xa8
+ cinc x0, x0, ne
+ cmp x9, #0xa9
+ cinc x0, x0, ne
+ cmp x10, #0xaa
+ cinc x0, x0, ne
+ cmp x11, #0xab
+ cinc x0, x0, ne
+ cmp x12, #0xac
+ cinc x0, x0, ne
+ cmp x13, #0xad
+ cinc x0, x0, ne
+ cmp x14, #0xae
+ cinc x0, x0, ne
+ cmp x15, #0xaf
+ cinc x0, x0, ne
+ cmp x16, #0xb0
+ cinc x0, x0, ne
+ cmp x17, #0xb1
+ cinc x0, x0, ne
+ cmp x18, #0xb2
+ cinc x0, x0, ne
+ cmp x19, #0xb3
+ cinc x0, x0, ne
+ cmp x20, #0xb4
+ cinc x0, x0, ne
+ cmp x21, #0xb5
+ cinc x0, x0, ne
+ cmp x22, #0xb6
+ cinc x0, x0, ne
+ cmp x23, #0xb7
+ cinc x0, x0, ne
+ cmp x24, #0xb8
+ cinc x0, x0, ne
+ cmp x25, #0xb9
+ cinc x0, x0, ne
+ cmp x26, #0xba
+ cinc x0, x0, ne
+ cmp x27, #0xbb
+ cinc x0, x0, ne
+ cmp x28, #0xbc
+ cinc x0, x0, ne
+ cmp x29, #0xbd
+ cinc x0, x0, ne
+
+ /* Restore x19-x28 per AAPCS64. */
+ ldp x19, x20, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x25, x26, [sp], #16
+ ldp x27, x28, [sp], #16
+
+ /* Restore FP/LR. */
+ ldp x29, x30, [sp], #16
+ ret
+endfunc test_ffa_smc
+
+/**
+ * test_ffa_smc_ext
+ *
+ * x0 - ptr to a struct ffa_value
+ *
+ * This function is used to test FF-A ABIs on top of SMCCCv1.2 for interfaces
+ * requiring at most 18 input/output registers.
+ * Load 18 GP input registers, move a pattern into x18-x29 and emit an SMC.
+ * On return save 18 output registers to the structure and compare x18-x29
+ * to the known pattern. If a register was altered it indicates an SMCCC
+ * violation and the function returns with a value greater than 0.
+ * The function returns 0 on success.
+ */
+func test_ffa_smc_ext
+ /* Save FP/LR. */
+ stp x29, x30, [sp, #-16]!
+
+ /* Save x19-x28 per AAPCS64. */
+ stp x27, x28, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+
+ /*
+ * Store the struct ffa_value_ext pointer to x30.
+ * x30 is preserved by the SMC callee.
+ */
+ mov x30, x0
+
+ /* Load the SMC service input values. */
+ ldp x0, x1, [x30]
+ ldp x2, x3, [x30, #16]
+ ldp x4, x5, [x30, #32]
+ ldp x6, x7, [x30, #48]
+ ldp x8, x9, [x30, #64]
+ ldp x10, x11, [x30, #80]
+ ldp x12, x13, [x30, #96]
+ ldp x14, x15, [x30, #112]
+ ldp x16, x17, [x30, #128]
+ mov x18, #0xb2
+ add x19, x18, #1
+ add x20, x18, #2
+ add x21, x18, #3
+ add x22, x18, #4
+ add x23, x18, #5
+ add x24, x18, #6
+ add x25, x18, #7
+ add x26, x18, #8
+ add x27, x18, #9
+ add x28, x18, #10
+ add x29, x18, #11
+ smc #0
+
+ /* Store the SMC service return values. */
+ stp x0, x1, [x30]
+ stp x2, x3, [x30, #16]
+ stp x4, x5, [x30, #32]
+ stp x6, x7, [x30, #48]
+ stp x8, x9, [x30, #64]
+ stp x10, x11, [x30, #80]
+ stp x12, x13, [x30, #96]
+ stp x14, x15, [x30, #112]
+ stp x16, x17, [x30, #128]
+
+ /* Check if SMC callee-preserved registers were altered. */
+ mov w0, wzr
+ cmp x18, #0xb2
+ cinc x0, x0, ne
+ cmp x19, #0xb3
+ cinc x0, x0, ne
+ cmp x20, #0xb4
+ cinc x0, x0, ne
+ cmp x21, #0xb5
+ cinc x0, x0, ne
+ cmp x22, #0xb6
+ cinc x0, x0, ne
+ cmp x23, #0xb7
+ cinc x0, x0, ne
+ cmp x24, #0xb8
+ cinc x0, x0, ne
+ cmp x25, #0xb9
+ cinc x0, x0, ne
+ cmp x26, #0xba
+ cinc x0, x0, ne
+ cmp x27, #0xbb
+ cinc x0, x0, ne
+ cmp x28, #0xbc
+ cinc x0, x0, ne
+ cmp x29, #0xbd
+ cinc x0, x0, ne
+
+ /* Restore x19-x28 per AAPCS64. */
+ ldp x19, x20, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x25, x26, [sp], #16
+ ldp x27, x28, [sp], #16
+
+ /* Restore FP/LR. */
+ ldp x29, x30, [sp], #16
+ ret
+endfunc test_ffa_smc_ext
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_version.c b/tftf/tests/runtime_services/secure_service/test_ffa_version.c
deleted file mode 100644
index 41eca5adc..000000000
--- a/tftf/tests/runtime_services/secure_service/test_ffa_version.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2020-2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <ffa_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-/*
- * Using FFA version expected for SPM.
- */
-#define SPM_VERSION MAKE_FFA_VERSION(FFA_VERSION_MAJOR, FFA_VERSION_MINOR)
-
-static bool should_skip_test;
-
-/*
- * Calls FFA Version ABI, and checks if the result as expected.
- */
-static test_result_t test_ffa_version(uint32_t input_version, uint32_t expected_return)
-{
- if (should_skip_test) {
- return TEST_RESULT_SKIPPED;
- }
-
- smc_ret_values ret_values = ffa_version(input_version);
-
- uint32_t spm_version = (uint32_t)(0xFFFFFFFF & ret_values.ret0);
-
- if (spm_version == expected_return) {
- return TEST_RESULT_SUCCESS;
- }
-
- tftf_testcase_printf("Input Version: 0x%x\nReturn: 0x%x\nExpected: 0x%x\n",
- input_version, spm_version, expected_return);
-
- return TEST_RESULT_FAIL;
-}
-
-/*
- * @Test_Aim@ Validate what happens when using same version as SPM.
- */
-test_result_t test_ffa_version_equal(void)
-{
- /*
- * FFA_VERSION interface is used to check that SPM functionality is supported.
- * On FFA_VERSION invocation from TFTF, the SPMD returns either NOT_SUPPORTED or
- * the SPMC version value provided in the SPMC manifest. The variable "should_skip_test"
- * is set to true when the SPMD returns NOT_SUPPORTED or a mismatched version, which
- * means that a TFTF physical FF-A endpoint version (SPM_VERSION) does not match the
- * SPMC's physical FF-A endpoint version. This prevents running the subsequent FF-A
- * version tests (and break the test flow), as they're not relevant when the SPMD is
- * not present within BL31 (FFA_VERSION returns NOT_SUPPORTED).
- */
- test_result_t ret = test_ffa_version(SPM_VERSION, SPM_VERSION);
- if (ret != TEST_RESULT_SUCCESS) {
- should_skip_test = true;
- ret = TEST_RESULT_SKIPPED;
- }
- return ret;
-}
-
-/*
- * @Test_Aim@ Validate what happens when setting bit 31 in
- * 'input_version'. As per spec, FFA version is 31 bits long.
- * Bit 31 set is an invalid input.
- */
-test_result_t test_ffa_version_bit31(void)
-{
- return test_ffa_version(FFA_VERSION_BIT31_MASK | SPM_VERSION, FFA_ERROR_NOT_SUPPORTED);
-}
-
-/*
- * @Test_Aim@ Validate what happens for bigger version than SPM's.
- */
-test_result_t test_ffa_version_bigger(void)
-{
- return test_ffa_version(MAKE_FFA_VERSION(FFA_VERSION_MAJOR + 1, 0), SPM_VERSION);
-}
-
-/*
- * @Test_Aim@ Validate what happens for smaller version than SPM's.
- */
-test_result_t test_ffa_version_smaller(void)
-{
- return test_ffa_version(MAKE_FFA_VERSION(0, 9), SPM_VERSION);
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_quark_request.c b/tftf/tests/runtime_services/secure_service/test_quark_request.c
deleted file mode 100644
index 0f9556fe2..000000000
--- a/tftf/tests/runtime_services/secure_service/test_quark_request.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <quark_def.h>
-#include <spci_helpers.h>
-#include <test_helpers.h>
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs a simple
- * request to Quark to verify that its memory map is correct and that it is
- * working as expected.
- */
-test_result_t test_quark_request(void)
-{
- int ret;
- uint16_t handle_quark;
- u_register_t rx1, rx2, rx3;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /* Open handles. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_quark,
- QUARK_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- return TEST_RESULT_FAIL;
- }
-
- /* Send request to Quark */
-
- ret = spci_service_request_blocking(QUARK_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_quark,
- &rx1, &rx2, &rx3);
-
- if (ret == SPCI_SUCCESS) {
- if (rx1 != QUARK_MAGIC_NUMBER) {
- tftf_testcase_printf("%d: Quark returned 0x%x 0x%lx 0x%lx 0x%lx\n",
- __LINE__, (uint32_t)ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- }
- } else {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Close handle */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_quark);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- return result;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c b/tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c
deleted file mode 100644
index 6d248428e..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_blocking_request.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <events.h>
-#include <ivy_def.h>
-#include <plat_topology.h>
-#include <platform.h>
-#include <power_management.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-#define TEST_NUM_ITERATIONS 1000U
-
-static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
-
-static test_result_t test_spci_blocking_multicore_fn(void)
-{
- int ret;
- uint16_t handle_cactus, handle_ivy;
- u_register_t rx1, rx2, rx3;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos = platform_get_core_pos(cpu_mpid);
-
- tftf_send_event(&cpu_has_entered_test[core_pos]);
-
- /* Open handles. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_none;
- }
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_ivy,
- IVY_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_cactus;
- }
-
- /* Request services. */
-
- for (unsigned int i = 0U; i < TEST_NUM_ITERATIONS; i++) {
-
- /* Send request to Cactus */
-
- ret = spci_service_request_blocking(CACTUS_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &rx1, &rx2, &rx3);
-
- if (ret == SPCI_BUSY) {
- /*
- * Another CPU is already using Cactus, this is not a
- * failure.
- */
- } else if (ret == SPCI_SUCCESS) {
- if (rx1 != CACTUS_MAGIC_NUMBER) {
- tftf_testcase_printf("%d: Cactus returned 0x%x 0x%lx 0x%lx 0x%lx\n",
- __LINE__, (uint32_t)ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- break;
- }
- } else {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- break;
- }
-
- /* Send request to Ivy */
-
- ret = spci_service_request_blocking(IVY_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_ivy,
- &rx1, &rx2, &rx3);
- if (ret == SPCI_BUSY) {
- /*
- * Another CPU is already using Ivy, this is not a
- * failure.
- */
- } else if (ret == SPCI_SUCCESS) {
- if (rx1 != IVY_MAGIC_NUMBER) {
- tftf_testcase_printf("%d: Ivy returned 0x%x 0x%lx 0x%lx 0x%lx\n",
- __LINE__, (uint32_t)ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- break;
- }
- } else {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- break;
- }
-
- }
-
- /* Close handles. */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_ivy);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
-exit_cactus:
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests finished. */
-exit_none:
- return result;
-}
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs many simple
- * blocking requests to Cactus and Ivy from multiple cores
- */
-test_result_t test_spci_blocking_request_multicore(void)
-{
- unsigned int cpu_node, core_pos;
- int psci_ret;
- u_register_t cpu_mpid;
- u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
- tftf_init_event(&cpu_has_entered_test[i]);
- }
-
- /* Power on all CPUs */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU as it is already powered on */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- psci_ret = tftf_cpu_on(cpu_mpid,
- (uintptr_t)test_spci_blocking_multicore_fn, 0);
- if (psci_ret != PSCI_E_SUCCESS) {
- core_pos = platform_get_core_pos(cpu_mpid);
-
- tftf_testcase_printf(
- "Failed to power on CPU %d (rc = %d)\n",
- core_pos, psci_ret);
- return TEST_RESULT_FAIL;
- }
- }
-
- /* Wait until all CPUs have started the test. */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(cpu_mpid);
- tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
- }
-
- /* Enter the test on lead CPU and return the result. */
- return test_spci_blocking_multicore_fn();
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c b/tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c
deleted file mode 100644
index dffd910f8..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_blocking_while_busy.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <string.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-#include <timer.h>
-
-static volatile int timer_irq_received;
-
-/*
- * ISR for the timer interrupt. Update a global variable to check it has been
- * called.
- */
-static int timer_handler(void *data)
-{
- assert(timer_irq_received == 0);
- timer_irq_received = 1;
- return 0;
-}
-
-/*
- * @Test_Aim@ Test that blocking requests can only be done when there are no
- * active non-blocking requests in a partition.
- *
- * 1. Register a handler for the non-secure timer interrupt. Program it to fire
- * in a certain time.
- *
- * 2. Send a non-blocking request to Cactus to sleep for more time than the
- * timer.
- *
- * 3. While servicing the timer sleep request, the non-secure timer should
- * fire and interrupt Cactus.
- *
- * 5. Check that the interrupt has been handled.
- *
- * 6. Make sure that the response isn't ready yet.
- *
- * 7. Try to send a blocking request. It should be denied because the partition
- * is busy.
- *
- * 8. Return to Cactus to finish the request.
- */
-test_result_t test_spci_blocking_while_busy(void)
-{
- int ret;
- u_register_t rx1, rx2, rx3;
- uint16_t handle_cactus;
- uint32_t token_cactus;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /* Open handle. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /* Program timer */
-
- timer_irq_received = 0;
- tftf_timer_register_handler(timer_handler);
-
- ret = tftf_program_timer(100);
- if (ret < 0) {
- tftf_testcase_printf("Failed to program timer (%d)\n", ret);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- enable_irq();
-
- /* Send request to Cactus */
-
- ret = spci_service_request_start(CACTUS_SLEEP_MS, 200U,
- 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &token_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Check that the interrupt has been handled. */
-
- if (timer_irq_received == 0) {
- tftf_testcase_printf("%d: Didn't handle interrupt\n", __LINE__);
- result = TEST_RESULT_FAIL;
- }
-
- tftf_cancel_timer();
- tftf_timer_unregister_handler();
-
- /* Make sure that the response is not ready yet. */
-
- ret = spci_service_get_response(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- NULL, NULL, NULL);
-
- if (ret == SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned SPCI_SUCCESS\n",
- __LINE__);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- /*
- * Try to send a blocking request. It should be denied because the
- * partition is busy.
- */
-
- ret = spci_service_request_blocking(CACTUS_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &rx1, &rx2, &rx3);
- if (ret != SPCI_BUSY) {
- tftf_testcase_printf("%d: Cactus should have returned SPCI_BUSY. Returned %d 0x%lx 0x%lx 0x%lx\n",
- __LINE__, ret, rx1, rx2, rx3);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- /* Re-enter Cactus to finish the request */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- &rx1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Close handle. */
-exit_close_handle:
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests finished. */
-
- return result;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_handle_open.c b/tftf/tests/runtime_services/secure_service/test_spci_handle_open.c
deleted file mode 100644
index 522465ee1..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_handle_open.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <events.h>
-#include <plat_topology.h>
-#include <platform.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-/*
- * @Test_Aim@ This tests that we can get the handle of a Secure Service and
- * close it correctly.
- */
-test_result_t test_spci_handle_open(void)
-{
- int ret;
- uint16_t handle1, handle2;
-
- /**********************************************************************
- * Verify that SPCI is there and that it has the correct version.
- **********************************************************************/
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /**********************************************************************
- * Try to get handle of an invalid Secure Service.
- **********************************************************************/
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle1,
- CACTUS_INVALID_UUID);
-
- if (ret != SPCI_NOT_PRESENT) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_NOT_PRESENT. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * Get handle of valid Secure Services.
- **********************************************************************/
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle1,
- CACTUS_SERVICE1_UUID);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle2,
- CACTUS_SERVICE2_UUID);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * Close invalid handle.
- **********************************************************************/
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, ~handle1);
-
- if (ret != SPCI_INVALID_PARAMETER) {
- tftf_testcase_printf("%d: SPM didn't fail to close the handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * Close valid handles.
- **********************************************************************/
-
- /* Close in the reverse order to test that it can be done. */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle2);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle1);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /**********************************************************************
- * All tests passed.
- **********************************************************************/
-
- return TEST_RESULT_SUCCESS;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c b/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c
deleted file mode 100644
index cb5dd6b0c..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_interrupt.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <assert.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <smccc.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <string.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-#include <timer.h>
-
-static volatile int timer_irq_received;
-
-/*
- * ISR for the timer interrupt. Update a global variable to check it has been
- * called.
- */
-static int timer_handler(void *data)
-{
- assert(timer_irq_received == 0);
- timer_irq_received = 1;
- return 0;
-}
-
-/*
- * @Test_Aim@ Test that non-secure interrupts interrupt non-blocking requests.
- *
- * 1. Register a handler for the non-secure timer interrupt. Program it to fire
- * in a certain time.
- *
- * 2. Send a non-blocking request to Cactus to sleep for more time than the
- * timer.
- *
- * 3. While servicing the timer sleep request, the non-secure timer should
- * fire and interrupt Cactus.
- *
- * 4. Make sure that the response isn't ready yet.
- *
- * 5. In the TFTF, check that the interrupt has been handled.
- *
- * 6. Return to Cactus to finish the request.
- */
-test_result_t test_spci_non_blocking_interrupt_by_ns(void)
-{
- int ret;
- u_register_t ret1;
- uint16_t handle_cactus;
- uint32_t token_cactus;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- /* Open handle */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: %d\n",
- __LINE__, ret);
- return TEST_RESULT_FAIL;
- }
-
- /* Program timer */
-
- timer_irq_received = 0;
- tftf_timer_register_handler(timer_handler);
-
- ret = tftf_program_timer(100);
- if (ret < 0) {
- tftf_testcase_printf("Failed to program timer (%d)\n", ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Send request to Cactus */
-
- ret = spci_service_request_start(CACTUS_SLEEP_MS, 200U,
- 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &token_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Check that the interrupt has been handled */
-
- tftf_cancel_timer();
- tftf_timer_unregister_handler();
-
- if (timer_irq_received == 0) {
- tftf_testcase_printf("%d: Didn't handle interrupt\n", __LINE__);
- result = TEST_RESULT_FAIL;
- }
-
- /* Make sure that the response is not ready yet */
-
- ret = spci_service_get_response(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- NULL, NULL, NULL);
-
- if (ret == SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned SPCI_SUCCESS\n",
- __LINE__);
- result = TEST_RESULT_FAIL;
- goto exit_close_handle;
- }
-
- /* Re-enter Cactus to finish the request */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- &ret1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: Cactus returned 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* Close handle */
-exit_close_handle:
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: %d\n",
- __LINE__, ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests finished */
-
- return result;
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c b/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c
deleted file mode 100644
index 8d3506b55..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spci_non_blocking_request.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Copyright (c) 2018, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <arch_helpers.h>
-#include <cactus_def.h>
-#include <debug.h>
-#include <events.h>
-#include <ivy_def.h>
-#include <plat_topology.h>
-#include <platform.h>
-#include <power_management.h>
-#include <smccc.h>
-#include <stdbool.h>
-#include <spci_helpers.h>
-#include <spci_svc.h>
-#include <test_helpers.h>
-#include <tftf_lib.h>
-
-#define TEST_NUM_ITERATIONS 1000U
-
-test_result_t test_spci_non_blocking_fn(void)
-{
- int ret;
- u_register_t ret1;
- uint16_t handle_cactus, handle_ivy;
- uint32_t token_cactus, token_ivy;
- test_result_t result = TEST_RESULT_SUCCESS;
-
- /* Open handles. */
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_cactus,
- CACTUS_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_none;
- }
-
- ret = spci_service_handle_open(TFTF_SPCI_CLIENT_ID, &handle_ivy,
- IVY_SERVICE1_UUID);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to return a valid handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- goto exit_cactus;
- }
-
- /* Request services. */
-
- for (unsigned int i = 0U; i < TEST_NUM_ITERATIONS; i++) {
-
- bool exit_loop = false;
-
- /* Send request to Cactus */
-
- ret = spci_service_request_start(CACTUS_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- &token_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* Send request to Ivy */
-
- ret = spci_service_request_start(IVY_GET_MAGIC,
- 0, 0, 0, 0, 0,
- TFTF_SPCI_CLIENT_ID,
- handle_ivy,
- &token_ivy);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM should have returned SPCI_SUCCESS. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* Get response from Ivy */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_ivy,
- token_ivy,
- &ret1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if ((ret != SPCI_SUCCESS) || (ret1 != IVY_MAGIC_NUMBER)) {
- tftf_testcase_printf("%d: Ivy returned 0x%x 0x%lx\n",
- __LINE__, (uint32_t)ret, ret1);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* Get response from Cactus */
-
- do {
- ret = spci_service_request_resume(TFTF_SPCI_CLIENT_ID,
- handle_cactus,
- token_cactus,
- &ret1, NULL, NULL);
- } while (ret == SPCI_QUEUED);
-
- if ((ret != SPCI_SUCCESS) || (ret1 != CACTUS_MAGIC_NUMBER)) {
- tftf_testcase_printf("%d: Cactus returned 0x%x 0x%lx\n",
- __LINE__, (uint32_t)ret, ret1);
- result = TEST_RESULT_FAIL;
- exit_loop = true;
- }
-
- /* If there has been an error, exit early. */
- if (exit_loop) {
- break;
- }
- }
-
- /* Close handles. */
-
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_ivy);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
-exit_cactus:
- ret = spci_service_handle_close(TFTF_SPCI_CLIENT_ID, handle_cactus);
- if (ret != SPCI_SUCCESS) {
- tftf_testcase_printf("%d: SPM failed to close the handle. Returned: 0x%x\n",
- __LINE__, (uint32_t)ret);
- result = TEST_RESULT_FAIL;
- }
-
- /* All tests passed. */
-exit_none:
- return result;
-}
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs many simple
- * non-blocking requests to Cactus and Ivy.
- */
-test_result_t test_spci_request(void)
-{
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- return test_spci_non_blocking_fn();
-}
-
-/******************************************************************************/
-
-static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
-
-static test_result_t test_spci_non_blocking_multicore_fn(void)
-{
- u_register_t cpu_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int core_pos = platform_get_core_pos(cpu_mpid);
-
- tftf_send_event(&cpu_has_entered_test[core_pos]);
-
- return test_spci_non_blocking_fn();
-}
-
-/*
- * @Test_Aim@ This tests opens a Secure Service handle and performs many simple
- * non-blocking requests to Cactus and Ivy from multiple cores
- */
-test_result_t test_spci_request_multicore(void)
-{
- unsigned int cpu_node, core_pos;
- int psci_ret;
- u_register_t cpu_mpid;
- u_register_t lead_mpid = read_mpidr_el1() & MPID_MASK;
-
- SKIP_TEST_IF_SPCI_VERSION_LESS_THAN(0, 1);
-
- for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
- tftf_init_event(&cpu_has_entered_test[i]);
- }
-
- /* Power on all CPUs */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU as it is already powered on */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(cpu_mpid);
-
- psci_ret = tftf_cpu_on(cpu_mpid,
- (uintptr_t)test_spci_non_blocking_multicore_fn, 0);
- if (psci_ret != PSCI_E_SUCCESS) {
- tftf_testcase_printf(
- "Failed to power on CPU %d (rc = %d)\n",
- core_pos, psci_ret);
- return TEST_RESULT_FAIL;
- }
- }
-
- /* Wait until all CPUs have started the test. */
- for_each_cpu(cpu_node) {
- cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
- /* Skip lead CPU */
- if (cpu_mpid == lead_mpid) {
- continue;
- }
-
- core_pos = platform_get_core_pos(cpu_mpid);
- tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
- }
-
- /* Enter the test on lead CPU and return the result. */
- return test_spci_non_blocking_fn();
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c b/tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c
deleted file mode 100644
index f57fa243b..000000000
--- a/tftf/tests/runtime_services/secure_service/test_spm_cpu_features.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <cactus_test_cmds.h>
-#include <ffa_endpoints.h>
-#include <ffa_helpers.h>
-#include <test_helpers.h>
-
-#define SENDER HYP_ID
-#define RECEIVER SP_ID(1)
-
-static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
-
-static test_result_t simd_vector_compare(simd_vector_t a[SIMD_NUM_VECTORS],
- simd_vector_t b[SIMD_NUM_VECTORS])
-{
- for (unsigned int num = 0U; num < SIMD_NUM_VECTORS; num++) {
- if (memcmp(a[num], b[num], sizeof(simd_vector_t)) != 0) {
- ERROR("Vectors not equal: a:0x%llx b:0x%llx\n",
- (uint64_t)a[num][0], (uint64_t)b[num][0]);
- return TEST_RESULT_FAIL;
- }
- }
- return TEST_RESULT_SUCCESS;
-}
-
-/*
- * Tests that SIMD vectors are preserved during the context switches between
- * normal world and the secure world.
- * Fills the SIMD vectors with known values, requests SP to fill the vectors
- * with a different values, checks that the context is restored on return.
- */
-test_result_t test_simd_vectors_preserved(void)
-{
- SKIP_TEST_IF_AARCH32();
-
- /**********************************************************************
- * Verify that FFA is there and that it has the correct version.
- **********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
-
- simd_vector_t simd_vectors_send[SIMD_NUM_VECTORS],
- simd_vectors_receive[SIMD_NUM_VECTORS];
-
- /* 0x11 is just a dummy value to be distinguished from the value in the
- * secure world. */
- for (unsigned int num = 0U; num < SIMD_NUM_VECTORS; num++) {
- memset(simd_vectors_send[num], 0x11 * num, sizeof(simd_vector_t));
- }
-
- fill_simd_vector_regs(simd_vectors_send);
-
- smc_ret_values ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
-
- if (!is_ffa_direct_response(ret)) {
- return TEST_RESULT_FAIL;
- }
-
- if (cactus_get_response(ret) == CACTUS_ERROR) {
- return TEST_RESULT_FAIL;
- }
-
- read_simd_vector_regs(simd_vectors_receive);
-
- return simd_vector_compare(simd_vectors_send, simd_vectors_receive);
-}
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_simd.c b/tftf/tests/runtime_services/secure_service/test_spm_simd.c
new file mode 100644
index 000000000..cfc931f54
--- /dev/null
+++ b/tftf/tests/runtime_services/secure_service/test_spm_simd.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <cactus_test_cmds.h>
+#include <ffa_endpoints.h>
+#include <ffa_helpers.h>
+#include <fpu.h>
+#include <spm_test_helpers.h>
+#include <test_helpers.h>
+#include <lib/extensions/sve.h>
+
+#define SENDER HYP_ID
+#define RECEIVER SP_ID(1)
+#define SVE_TEST_ITERATIONS 100
+#define NS_SVE_OP_ARRAYSIZE 1024
+
+static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+
+static sve_z_regs_t sve_vectors_input;
+static sve_z_regs_t sve_vectors_output;
+static int sve_op_1[NS_SVE_OP_ARRAYSIZE];
+static int sve_op_2[NS_SVE_OP_ARRAYSIZE];
+static fpu_state_t g_fpu_state_write;
+static fpu_state_t g_fpu_state_read;
+
+/*
+ * Tests that SIMD vectors and FPU state are preserved during the context switches between
+ * normal world and the secure world.
+ * Fills the SIMD vectors, FPCR and FPSR with random values, requests SP to fill the vectors
+ * with a different values, request SP to check if secure SIMD context is restored.
+ * Checks that the NS context is restored on return.
+ */
+test_result_t test_simd_vectors_preserved(void)
+{
+ /**********************************************************************
+ * Verify that FF-A is there and that it has the correct version.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ fpu_state_write_rand(&g_fpu_state_write);
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = cactus_req_simd_compare_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Normal world verify its FPU/SIMD state registers data */
+ fpu_state_read(&g_fpu_state_read);
+ if (fpu_state_compare(&g_fpu_state_write, &g_fpu_state_read) != 0) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Tests that SVE vectors are preserved during the context switches between
+ * normal world and the secure world.
+ * Fills the SVE vectors with known values, requests SP to fill the vectors
+ * with a different values, checks that the context is restored on return.
+ */
+test_result_t test_sve_vectors_preserved(void)
+{
+ uint64_t vl;
+ uint8_t *sve_vector;
+
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
+
+ /**********************************************************************
+ * Verify that FF-A is there and that it has the correct version.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ /*
+ * Clear SVE vectors buffers used to compare the SVE state before calling
+ * into the Swd compared to SVE state restored after returning to NWd.
+ */
+ memset(sve_vectors_input, 0, sizeof(sve_vectors_input));
+ memset(sve_vectors_output, 0, sizeof(sve_vectors_output));
+
+ /* Set ZCR_EL2.LEN to implemented VL (constrained by EL3). */
+ write_zcr_el2(0xf);
+ isb();
+
+ /* Get the implemented VL. */
+ vl = sve_rdvl_1();
+
+ /* Fill each vector for the VL size with a fixed pattern. */
+ sve_vector = (uint8_t *) sve_vectors_input;
+ for (uint32_t vector_num = 0U; vector_num < SVE_NUM_VECTORS; vector_num++) {
+ memset(sve_vector, 0x11 * (vector_num + 1), vl);
+ sve_vector += vl;
+ }
+
+ /* Fill SVE vector registers with the buffer contents prepared above. */
+ sve_z_regs_write(&sve_vectors_input);
+
+ /*
+ * Call cactus secure partition which uses SIMD (and expect it doesn't
+ * affect the normal world state on return).
+ */
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return TEST_RESULT_FAIL;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Get the SVE vectors state after returning to normal world. */
+ sve_z_regs_read(&sve_vectors_output);
+
+ /* Compare to state before calling into secure world. */
+ if (sve_z_regs_compare(&sve_vectors_input, &sve_vectors_output) != 0UL) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Sends SIMD fill command to Cactus SP
+ * Returns:
+ * false - On success
+ * true - On failure
+ */
+#ifdef __aarch64__
+static bool callback_enter_cactus_sp(void)
+{
+ struct ffa_value ret = cactus_req_simd_fill_send_cmd(SENDER, RECEIVER);
+
+ if (!is_ffa_direct_response(ret)) {
+ return true;
+ }
+
+ if (cactus_get_response(ret) == CACTUS_ERROR) {
+ return true;
+ }
+
+ return false;
+}
+#endif /* __aarch64__ */
+
+/*
+ * Tests that SVE vector operations in normal world are not affected by context
+ * switches between normal world and the secure world.
+ */
+test_result_t test_sve_vectors_operations(void)
+{
+ unsigned int val;
+ bool cb_err;
+
+ SKIP_TEST_IF_SVE_NOT_SUPPORTED();
+
+ /**********************************************************************
+ * Verify that FF-A is there and that it has the correct version.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 1, expected_sp_uuids);
+
+ val = 2 * SVE_TEST_ITERATIONS;
+
+ for (unsigned int i = 0; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ sve_op_1[i] = val;
+ sve_op_2[i] = 1;
+ }
+
+ /* Set ZCR_EL2.LEN to implemented VL (constrained by EL3). */
+ write_zcr_el2(0xf);
+ isb();
+
+ for (unsigned int i = 0; i < SVE_TEST_ITERATIONS; i++) {
+ /* Perform SVE operations with intermittent calls to Swd. */
+ cb_err = sve_subtract_arrays_interleaved(sve_op_1, sve_op_1,
+ sve_op_2,
+ NS_SVE_OP_ARRAYSIZE,
+ &callback_enter_cactus_sp);
+ if (cb_err == true) {
+ ERROR("Callback to Cactus SP failed\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ }
+
+ /* Check result of SVE operations. */
+ for (unsigned int i = 0; i < NS_SVE_OP_ARRAYSIZE; i++) {
+ if (sve_op_1[i] != (val - SVE_TEST_ITERATIONS)) {
+ return TEST_RESULT_FAIL;
+ }
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/secure_service/test_spm_smmu.c b/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
index b041a976c..6237eb80b 100644
--- a/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
+++ b/tftf/tests/runtime_services/secure_service/test_spm_smmu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -7,32 +7,173 @@
#include <cactus_test_cmds.h>
#include <debug.h>
#include <ffa_endpoints.h>
+#include <runtime_services/host_realm_managment/host_realm_rmi.h>
#include <smccc.h>
+#include <spm_test_helpers.h>
#include <test_helpers.h>
+#if PLAT_fvp || PLAT_tc
+#include <sp_platform_def.h>
static const struct ffa_uuid expected_sp_uuids[] = { {PRIMARY_UUID} };
+#endif
+
+#define TEST_DMA_ENGINE_MEMCPY (2U)
+#define TEST_DMA_ENGINE_RAND48 (3U)
+
+/*
+ * Attribute encoding for Inner and Outer:
+ * Read-Allocate Write-Allocate Write-Back Normal Memory
+ */
+#define ATTR_ACACHE_RAWAWB_S (0xffU)
+#define ATTR_ACACHE_RAWAWB_NS (0x2ffU)
+
+/* Source attributes occupy the bottom halfword */
+#define DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_S ATTR_ACACHE_RAWAWB_S
+#define DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_NS ATTR_ACACHE_RAWAWB_NS
+
+/* Destination attributes occupy the top halfword */
+#define DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S (ATTR_ACACHE_RAWAWB_S << 16)
+#define DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS (ATTR_ACACHE_RAWAWB_NS << 16)
/**************************************************************************
- * Send a command to SP1 initiate DMA service with the help of a peripheral
- * device upstream of an SMMUv3 IP
+ * test_smmu_spm
+ *
+ * Send commands to SP1 initiate DMA service with the help of a peripheral
+ * device upstream of an SMMUv3 IP.
+ * The scenario involves randomizing a secure buffer (first DMA operation),
+ * copying this buffer to another location (second DMA operation),
+ * and checking (by CPU) that both buffer contents match.
**************************************************************************/
test_result_t test_smmu_spm(void)
{
- smc_ret_values ret;
+#if PLAT_fvp || PLAT_tc
+ struct ffa_value ret;
/**********************************************************************
* Check SPMC has ffa_version and expected FFA endpoints are deployed.
**********************************************************************/
- CHECK_SPMC_TESTING_SETUP(1, 0, expected_sp_uuids);
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
- VERBOSE("Sending command to SP %x for initiating DMA transfer\n",
+ VERBOSE("Sending command to SP %x for initiating DMA transfer.\n",
SP_ID(1));
- ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1));
+ /*
+ * Randomize first half of a secure buffer from the secure world
+ * through the SMMU test engine DMA.
+ * Destination memory attributes are secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_RAND48,
+ PLAT_CACTUS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE / 2,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S);
+
+ /* Expect the SMMU DMA operation to pass. */
+ if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Copy first half to second half of the buffer and
+ * check both match.
+ * Source and destination memory attributes are secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_MEMCPY,
+ PLAT_CACTUS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_S |
+ DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_S);
+
+ /* Expect the SMMU DMA operation to pass. */
+ if (cactus_get_response(ret) != CACTUS_SUCCESS) {
+ return TEST_RESULT_FAIL;
+ }
+
+ /*
+ * Copy first half to second half of the non-secure buffer and
+ * check both match.
+ * Source and destination memory attributes are non-secure rawaWB.
+ * This test helps to validate a scenario where a secure stream
+ * belonging to Cactus SP accesses non-secure IPA space.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_MEMCPY,
+ PLAT_CACTUS_NS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS |
+ DMA_ENGINE_ATTR_SRC_ACACHE_RAWAWB_NS);
+
+ /* Expect the SMMU DMA operation to pass. */
if (cactus_get_response(ret) != CACTUS_SUCCESS) {
return TEST_RESULT_FAIL;
}
return TEST_RESULT_SUCCESS;
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
}
+/**************************************************************************
+ * test_smmu_spm_invalid_access
+ *
+ * The scenario changes a NS buffer PAS into Realm PAS. It then queries a SP
+ * to initiate a secure DMA operation on this buffer through the SMMU.
+ * The operation is expected to fail as a secure DMA transaction to a Realm
+ * region fails SMMU GPC checks.
+ **************************************************************************/
+test_result_t test_smmu_spm_invalid_access(void)
+{
+#if PLAT_fvp || PLAT_tc
+ struct ffa_value ret;
+ u_register_t retmm;
+
+ /* Skip this test if RME is not implemented. */
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ /**********************************************************************
+ * Check SPMC has ffa_version and expected FFA endpoints are deployed.
+ **********************************************************************/
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ /* Update the NS buffer to Realm PAS. */
+ retmm = host_rmi_granule_delegate((u_register_t)PLAT_CACTUS_NS_MEMCPY_BASE);
+ if (retmm != 0UL) {
+ ERROR("Granule delegate failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ VERBOSE("Sending command to SP %x for initiating DMA transfer.\n",
+ SP_ID(1));
+
+ /*
+ * Attempt randomizing the buffer (now turned into Realm PAS)
+ * from the secure world through the SMMU test engine DMA.
+ * Destination memory attributes are non-secure rawaWB.
+ */
+ ret = cactus_send_dma_cmd(HYP_ID, SP_ID(1),
+ TEST_DMA_ENGINE_RAND48,
+ PLAT_CACTUS_NS_MEMCPY_BASE,
+ PLAT_CACTUS_MEMCPY_RANGE,
+ DMA_ENGINE_ATTR_DEST_ACACHE_RAWAWB_NS);
+
+ /* Update the buffer back to NS PAS. */
+ retmm = host_rmi_granule_undelegate((u_register_t)PLAT_CACTUS_NS_MEMCPY_BASE);
+ if (retmm != 0UL) {
+ ERROR("Granule undelegate failed!\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ /* Expect the SMMU DMA operation to have failed. */
+ if (cactus_get_response(ret) != CACTUS_ERROR) {
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+#else
+ return TEST_RESULT_SKIPPED;
+#endif
+}
diff --git a/tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c b/tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c
new file mode 100644
index 000000000..0baf471d3
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c
@@ -0,0 +1,739 @@
+/*
+ * Copyright (c) 2023-2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <events.h>
+#include <platform.h>
+#include <power_management.h>
+#include <psci.h>
+#include <smccc.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <platform_def.h>
+#include <string.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <errata_abi.h>
+
+static event_t cpu_has_entered_test[PLATFORM_CORE_COUNT];
+
+/* Forward flag */
+#define FORWARD_FLAG_EL1 0x00
+
+/* Extract revision and variant info */
+#define EXTRACT_REV_VAR(x) (x & MIDR_REV_MASK) | ((x >> (MIDR_VAR_SHIFT - MIDR_REV_BITS)) \
+ & MIDR_VAR_MASK)
+
+/* Extract the partnumber */
+#define EXTRACT_PARTNO(x) ((x >> MIDR_PN_SHIFT) & MIDR_PN_MASK)
+
+#define RXPX_RANGE(x, y, z) (((x >= y) && (x <= z)) ? true : false)
+
+/* Global pointer to point to individual cpu structs based on midr value */
+em_cpu_t *cpu_ptr;
+
+/*
+ * Errata list for CPUs. This list needs to be updated
+ * for every new errata added to the errata ABI list.
+ */
+em_cpu_t cortex_A15_errata_list = {
+ .cpu_pn = 0xC0F,
+ .cpu_errata = {
+ {816470, 0x30, 0xFF},
+ {827671, 0x30, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A17_errata_list = {
+ .cpu_pn = 0xC0E,
+ .cpu_errata = {
+ {852421, 0x00, 0x12},
+ {852423, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A9_errata_list = {
+ .cpu_pn = 0xC09,
+ .cpu_errata = {
+ {790473, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A35_errata_list = {
+ .cpu_pn = 0xD04,
+ .cpu_errata = {
+ {855472, 0x00, 0x00},
+ {1234567, 0x00, 0x00},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A53_errata_list = {
+ .cpu_pn = 0xD03,
+ .cpu_errata = {
+ {819472, 0x00, 0x01},
+ {824069, 0x00, 0x02},
+ {826319, 0x00, 0x02},
+ {827319, 0x00, 0x02},
+ {835769, 0x00, 0x04},
+ {836870, 0x00, 0x03},
+ {843419, 0x00, 0x04},
+ {855873, 0x03, 0xFF},
+ {1530924, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A55_errata_list = {
+ .cpu_pn = 0xD05,
+ .cpu_errata = {
+ {768277, 0x00, 0x00},
+ {778703, 0x00, 0x00},
+ {798797, 0x00, 0x00},
+ {846532, 0x00, 0x01},
+ {903758, 0x00, 0x01},
+ {1221012, 0x00, 0x10},
+ {1530923, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A57_errata_list = {
+ .cpu_pn = 0xD07,
+ .cpu_errata = {
+ {806969, 0x00, 0x00},
+ {813419, 0x00, 0x00},
+ {813420, 0x00, 0x00},
+ {814670, 0x00, 0x00},
+ {817169, 0x00, 0x01},
+ {826974, 0x00, 0x11},
+ {826977, 0x00, 0x11},
+ {828024, 0x00, 0x11},
+ {829520, 0x00, 0x12},
+ {833471, 0x00, 0x12},
+ {859972, 0x00, 0x13},
+ {1319537, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A72_errata_list = {
+ .cpu_pn = 0xD08,
+ .cpu_errata = {
+ {859971, 0x00, 0x03},
+ {1234567, 0x00, 0xFF},
+ {1319367, 0x00, 0xFF},
+ {9876543, 0x00, 0xFF},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A73_errata_list = {
+ .cpu_pn = 0xD09,
+ .cpu_errata = {
+ {852427, 0x00, 0x00},
+ {855423, 0x00, 0x01},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A75_errata_list = {
+ .cpu_pn = 0xD0A,
+ .cpu_errata = {
+ {764081, 0x00, 0x00},
+ {790748, 0x00, 0x00},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A76_errata_list = {
+ .cpu_pn = 0xD0B,
+ .cpu_errata = {
+ {1073348, 0x00, 0x10},
+ {1130799, 0x00, 0x20},
+ {1165522, 0x00, 0xFF},
+ {1220197, 0x00, 0x20},
+ {1257314, 0x00, 0x30},
+ {1262606, 0x00, 0x30},
+ {1262888, 0x00, 0x30},
+ {1275112, 0x00, 0x30},
+ {1286807, 0x00, 0x30},
+ {1791580, 0x00, 0x40},
+ {1868343, 0x00, 0x40},
+ {1946160, 0x30, 0x41},
+ {2743102, 0x00, 0x41},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A77_errata_list = {
+ .cpu_pn = 0xD0D,
+ .cpu_errata = {
+ {1508412, 0x00, 0x10},
+ {1791578, 0x00, 0x11},
+ {1800714, 0x00, 0x11},
+ {1925769, 0x00, 0x11},
+ {1946167, 0x00, 0x11},
+ {2356587, 0x00, 0x11},
+ {2743100, 0x00, 0x11},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A78_AE_errata_list = {
+ .cpu_pn = 0xD42,
+ .cpu_errata = {
+ {1941500, 0x00, 0x01},
+ {1951502, 0x00, 0x01},
+ {2376748, 0x00, 0x01},
+ {2712574, 0x00, 0x02},
+ {2395408, 0x00, 0x01},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A78_errata_list = {
+ .cpu_pn = 0xD41,
+ .cpu_errata = {
+ {1688305, 0x00, 0x10},
+ {1821534, 0x00, 0x10},
+ {1941498, 0x00, 0x11},
+ {1951500, 0x10, 0x11},
+ {1952683, 0x00, 0x00},
+ {2132060, 0x00, 0x12},
+ {2242635, 0x10, 0x12},
+ {2376745, 0x00, 0x12},
+ {2395406, 0x00, 0x12},
+ {2712571, 0x00, 0x12},
+ {2742426, 0x00, 0x12},
+ {2772019, 0x00, 0x12},
+ {2779479, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A78C_errata_list = {
+ .cpu_pn = 0xD4B,
+ .cpu_errata = {
+ {1827430, 0x00, 0x00},
+ {1827440, 0x00, 0x00},
+ {2132064, 0x01, 0x02},
+ {2242638, 0x01, 0x02},
+ {2376749, 0x01, 0x02},
+ {2395411, 0x01, 0x02},
+ {2683027, 0x01, 0x02},
+ {2712575, 0x01, 0x02},
+ {2743232, 0x01, 0x02},
+ {2772121, 0x00, 0x02},
+ {2779484, 0x01, 0x02},
+ {-1}
+ },
+};
+
+
+em_cpu_t cortex_X1_errata_list = {
+ .cpu_pn = 0xD44,
+ .cpu_errata = {
+ {1688305, 0x00, 0x10},
+ {1821534, 0x00, 0x10},
+ {1827429, 0x00, 0x10},
+ {-1}
+ },
+
+};
+
+em_cpu_t neoverse_N1_errata_list = {
+ .cpu_pn = 0xD0C,
+ .cpu_errata = {
+ {1043202, 0x00, 0x10},
+ {1073348, 0x00, 0x10},
+ {1130799, 0x00, 0x20},
+ {1165347, 0x00, 0x20},
+ {1207823, 0x00, 0x20},
+ {1220197, 0x00, 0x20},
+ {1257314, 0x00, 0x30},
+ {1262606, 0x00, 0x30},
+ {1262888, 0x00, 0x30},
+ {1275112, 0x00, 0x30},
+ {1315703, 0x00, 0x30},
+ {1542419, 0x30, 0x40},
+ {1868343, 0x00, 0x40},
+ {1946160, 0x30, 0x41},
+ {2743102, 0x00, 0x41},
+ {-1}
+ },
+};
+
+em_cpu_t neoverse_V1_errata_list = {
+ .cpu_pn = 0xD40,
+ .cpu_errata = {
+ {1618635, 0x00, 0x0F},
+ {1774420, 0x00, 0x10},
+ {1791573, 0x00, 0x10},
+ {1852267, 0x00, 0x10},
+ {1925756, 0x00, 0x11},
+ {1940577, 0x10, 0x11},
+ {1966096, 0x10, 0x11},
+ {2108267, 0x00, 0x11},
+ {2139242, 0x00, 0x11},
+ {2216392, 0x10, 0x11},
+ {2294912, 0x00, 0x11},
+ {2348377, 0x00, 0x11},
+ {2372203, 0x00, 0x11},
+ {2701953, 0x00, 0x11},
+ {2743093, 0x00, 0x12},
+ {2743233, 0x00, 0x12},
+ {2779461, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A710_errata_list = {
+ .cpu_pn = 0xD47,
+ .cpu_errata = {
+ {1987031, 0x00, 0x20},
+ {2008768, 0x00, 0x20},
+ {2017096, 0x00, 0x20},
+ {2055002, 0x10, 0x20},
+ {2058056, 0x00, 0x10},
+ {2081180, 0x00, 0x20},
+ {2083908, 0x20, 0x20},
+ {2136059, 0x00, 0x20},
+ {2147715, 0x20, 0x20},
+ {2216384, 0x00, 0x20},
+ {2267065, 0x00, 0x20},
+ {2282622, 0x00, 0x21},
+ {2291219, 0x00, 0x20},
+ {2371105, 0x00, 0x20},
+ {2701952, 0x00, 0x21},
+ {2742423, 0x00, 0x21},
+ {2768515, 0x00, 0x21},
+ {2778471, 0x00, 0x21},
+ {-1}
+ },
+};
+
+em_cpu_t neoverse_N2_errata_list = {
+ .cpu_pn = 0xD49,
+ .cpu_errata = {
+ {2002655, 0x00, 0x00},
+ {2025414, 0x00, 0x00},
+ {2067956, 0x00, 0x00},
+ {2138953, 0x00, 0x00},
+ {2138956, 0x00, 0x00},
+ {2138958, 0x00, 0x00},
+ {2189731, 0x00, 0x00},
+ {2242400, 0x00, 0x00},
+ {2242415, 0x00, 0x00},
+ {2280757, 0x00, 0x00},
+ {2326639, 0x00, 0x00},
+ {2376738, 0x00, 0x00},
+ {2388450, 0x00, 0x00},
+ {2728475, 0x00, 0x02},
+ {2743089, 0x00, 0x02},
+ {2779511, 0x00, 0x02},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_X2_errata_list = {
+ .cpu_pn = 0xD48,
+ .cpu_errata = {
+ {2002765, 0x00, 0x20},
+ {2017096, 0x00, 0x20},
+ {2058056, 0x00, 0x20},
+ {2081180, 0x00, 0x20},
+ {2083908, 0x00, 0x20},
+ {2147715, 0x20, 0x20},
+ {2216384, 0x00, 0x20},
+ {2282622, 0x00, 0x21},
+ {2371105, 0x00, 0x21},
+ {2701952, 0x00, 0x21},
+ {2742423, 0x00, 0x21},
+ {2768515, 0x00, 0x21},
+ {2778471, 0x00, 0x21},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A510_errata_list = {
+ .cpu_pn = 0xD46,
+ .cpu_errata = {
+ {1922240, 0x00, 0x00},
+ {2041909, 0x02, 0x02},
+ {2042739, 0x00, 0x02},
+ {2080326, 0x02, 0x02},
+ {2172148, 0x00, 0x10},
+ {2218950, 0x00, 0x10},
+ {2250311, 0x00, 0x10},
+ {2288014, 0x00, 0x10},
+ {2347730, 0x00, 0x11},
+ {2371937, 0x00, 0x11},
+ {2666669, 0x00, 0x11},
+ {2684597, 0x00, 0x12},
+ {1234567, 0x00, 0x12},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_X4_errata_list = {
+ .cpu_pn = 0xD82,
+ .cpu_errata = {
+ {2701112, 0x00, 0x00},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A715_errata_list = {
+ .cpu_pn = 0xD4D,
+ .cpu_errata = {
+ {2561034, 0x10, 0x10},
+ {-1}
+ },
+};
+
+em_cpu_t neoverse_V2_errata_list = {
+ .cpu_pn = 0xD4F,
+ .cpu_errata = {
+ {2331132, 0x00, 0x02},
+ {2618597, 0x00, 0x01},
+ {2662553, 0x00, 0x01},
+ {2719103, 0x00, 0x01},
+ {2719103, 0x00, 0x01},
+ {2719105, 0x00, 0x01},
+ {2743011, 0x00, 0x01},
+ {2779510, 0x00, 0x01},
+ {2801372, 0x00, 0x01},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_X3_errata_list = {
+ .cpu_pn = 0xD4E,
+ .cpu_errata = {
+ {2070301, 0x00, 0x12},
+ {2266875, 0x00, 0x10},
+ {2302506, 0x00, 0x11},
+ {2313909, 0x00, 0x10},
+ {2615812, 0x00, 0x11},
+ {2641945, 0x00, 0x10},
+ {2701951, 0x00, 0x11},
+ {2742421, 0x00, 0x11},
+ {2743088, 0x00, 0x11},
+ {2779509, 0x00, 0x11},
+ {-1}
+ },
+};
+
+em_cpu_t cortex_A520_errata_list = {
+ .cpu_pn = 0xD80,
+ .cpu_errata = {
+ {2630792, 0x00, 0x01},
+ {2858100, 0x00, 0x01},
+ {-1}
+ },
+};
+
+/*
+ * Test function checks for the em_version implemented
+ * - Test fails if the version returned is < 1.0.
+ * - Test passes if the version returned is == 1.0
+ */
+test_result_t test_em_version(void)
+{
+ int32_t version_return = tftf_em_abi_version();
+
+ if ((version_return == (EM_ABI_VERSION(1, 0))))
+ return TEST_RESULT_SUCCESS;
+
+ if (version_return == EM_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ return TEST_RESULT_FAIL;
+}
+
+/*
+ * Test function checks for the em_features implemented
+ * Test fails if the em_feature is not implemented
+ * or if the fid is invalid.
+ */
+
+test_result_t test_em_features(void)
+{
+ int32_t version_return = tftf_em_abi_version();
+
+ if (version_return == EM_NOT_SUPPORTED)
+ return TEST_RESULT_SKIPPED;
+
+ if (!(tftf_em_abi_feature_implemented(EM_CPU_ERRATUM_FEATURES)))
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+
+/*
+ * Test function checks for the em_cpu_feature implemented
+ * Test fails if the em_cpu_feature is not implemented
+ * or if the fid is invalid.
+ */
+test_result_t test_em_cpu_features(void)
+{
+ test_result_t return_val = TEST_RESULT_FAIL;
+ smc_ret_values ret_val;
+
+ uint32_t midr_val = read_midr();
+ uint16_t rxpx_val_extracted = EXTRACT_REV_VAR(midr_val);
+ midr_val = EXTRACT_PARTNO(midr_val);
+
+ u_register_t mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int core_pos = platform_get_core_pos(mpid);
+
+ INFO("Partnum extracted = %x and rxpx extracted val = %x\n\n", midr_val, \
+ rxpx_val_extracted);
+ switch (midr_val) {
+ case 0xD09:
+ {
+ VERBOSE("MIDR matches A73 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A73_errata_list;
+ break;
+ }
+ case 0xD0B:
+ {
+ VERBOSE("MIDR matches A76 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A76_errata_list;
+ break;
+ }
+ case 0xD4D:
+ {
+ VERBOSE("MIDR matches A715 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A715_errata_list;
+ break;
+ }
+ case 0xD04:
+ {
+ VERBOSE("MIDR matches A35 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A35_errata_list;
+ break;
+ }
+ case 0xD03:
+ {
+ VERBOSE("MIDR matches A53 = %x\n", midr_val);
+ cpu_ptr = &cortex_A53_errata_list;
+ break;
+ }
+ case 0xD07:
+ {
+ VERBOSE("MIDR matches A57 = %x\n", midr_val);
+ cpu_ptr = &cortex_A57_errata_list;
+ break;
+ }
+ case 0xD08:
+ {
+ VERBOSE("MIDR matches A72 = %x\n", midr_val);
+ cpu_ptr = &cortex_A72_errata_list;
+ break;
+ }
+ case 0xD0D:
+ {
+ VERBOSE("MIDR matches A77 = %x\n", midr_val);
+ cpu_ptr = &cortex_A77_errata_list;
+ break;
+ }
+ case 0xD41:
+ {
+ VERBOSE("MIDR matches A78 = %x\n", midr_val);
+ cpu_ptr = &cortex_A78_errata_list;
+ break;
+ }
+ case 0xD0C:
+ {
+ VERBOSE("MIDR matches Neoverse N1 = %x\n", midr_val);
+ cpu_ptr = &neoverse_N1_errata_list;
+ break;
+ }
+ case 0xD4B:
+ {
+ VERBOSE("MIDR matches A78C = %x\n", midr_val);
+ cpu_ptr = &cortex_A78C_errata_list;
+ break;
+ }
+ case 0xD4F:
+ {
+ VERBOSE("MIDR matches Neoverse V2 -> %x\n", midr_val);
+ cpu_ptr = &neoverse_V2_errata_list;
+ break;
+ }
+ case 0xD47:
+ {
+ VERBOSE("MIDR matches A710 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A710_errata_list;
+ break;
+ }
+ case 0xD46:
+ {
+ VERBOSE("MIDR matches A510 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A510_errata_list;
+ break;
+ }
+ case 0xD48:
+ {
+ VERBOSE("MIDR matches X2 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X2_errata_list;
+ break;
+ }
+ case 0xD49:
+ {
+ VERBOSE("MIDR matches Neoverse N2 -> %x\n", midr_val);
+ cpu_ptr = &neoverse_N2_errata_list;
+ break;
+ }
+ case 0xD40:
+ {
+ VERBOSE("MIDR matches Neoverse V1 -> %x\n", midr_val);
+ cpu_ptr = &neoverse_V1_errata_list;
+ break;
+ }
+ case 0xD44:
+ {
+ VERBOSE("MIDR matches X1 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X1_errata_list;
+ break;
+ }
+ case 0xD0A:
+ {
+ VERBOSE("MIDR matches A75 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A75_errata_list;
+ break;
+ }
+ case 0xD05:
+ {
+ VERBOSE("MIDR matches A55 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A55_errata_list;
+ break;
+ }
+ case 0xD42:
+ {
+ VERBOSE("MIDR matches A78_AE -> %x\n", midr_val);
+ cpu_ptr = &cortex_A78_AE_errata_list;
+ break;
+ }
+ case 0xD82:
+ {
+ VERBOSE("MIDR matches Cortex-X4 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X4_errata_list;
+ break;
+ }
+ case 0xD4E:
+ {
+ VERBOSE("MIDR matches Cortex-X3 -> %x\n", midr_val);
+ cpu_ptr = &cortex_X3_errata_list;
+ break;
+ }
+ case 0xD80:
+ {
+ VERBOSE("MIDR matches A520 -> %x\n", midr_val);
+ cpu_ptr = &cortex_A520_errata_list;
+ break;
+ }
+ default:
+ {
+ ERROR("MIDR did not match any cpu\n");
+ return TEST_RESULT_SKIPPED;
+ break;
+ }
+ }
+
+ for (int i = 0; i < ERRATA_COUNT && cpu_ptr->cpu_errata[i].em_errata_id != -1; i++) {
+
+ ret_val = tftf_em_abi_cpu_feature_implemented \
+ (cpu_ptr->cpu_errata[i].em_errata_id, \
+ FORWARD_FLAG_EL1);
+
+ switch (ret_val.ret0) {
+
+ case EM_NOT_AFFECTED:
+ {
+ return_val = (RXPX_RANGE(rxpx_val_extracted, \
+ cpu_ptr->cpu_errata[i].rxpx_low, cpu_ptr->cpu_errata[i].rxpx_high) \
+ == false) ? TEST_RESULT_SUCCESS : TEST_RESULT_FAIL;
+ break;
+ }
+ case EM_AFFECTED:
+ {
+ return_val = TEST_RESULT_SUCCESS;
+ break;
+ }
+ case EM_HIGHER_EL_MITIGATION:
+ {
+ return_val = (RXPX_RANGE(rxpx_val_extracted, \
+ cpu_ptr->cpu_errata[i].rxpx_low, cpu_ptr->cpu_errata[i].rxpx_high) \
+ == true) ? TEST_RESULT_SUCCESS : TEST_RESULT_FAIL;
+ break;
+ }
+ case EM_UNKNOWN_ERRATUM:
+ {
+ return_val = TEST_RESULT_SUCCESS;
+ break;
+ }
+ default:
+ {
+ ERROR("Return value did not match the expected returns\n");
+ return_val = TEST_RESULT_FAIL;
+ break;
+ }
+ }
+ INFO("errata_id = %d and test_em_cpu_erratum_features = %ld\n",\
+ cpu_ptr->cpu_errata[i].em_errata_id, ret_val.ret0);
+ }
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_has_entered_test[core_pos]);
+ return return_val;
+}
+
+test_result_t test_errata_abi_features(void)
+{
+ unsigned int lead_mpid;
+ unsigned int cpu_mpid, cpu_node, core_pos;
+ int psci_ret;
+
+ int32_t version_return = tftf_em_abi_version();
+
+ SKIP_TEST_IF_LESS_THAN_N_CPUS(1);
+
+ if (version_return == EM_NOT_SUPPORTED) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ if (!(tftf_em_abi_feature_implemented(EM_CPU_ERRATUM_FEATURES))) {
+ return TEST_RESULT_FAIL;
+ }
+
+ lead_mpid = read_mpidr_el1() & MPID_MASK;
+ /* Power on all CPUs */
+ for_each_cpu(cpu_node) {
+ cpu_mpid = tftf_get_mpidr_from_node(cpu_node);
+
+ /* Skip lead CPU as it is already powered on */
+ if (cpu_mpid == lead_mpid)
+ continue;
+
+ psci_ret = tftf_cpu_on(cpu_mpid, (uintptr_t)test_em_cpu_features, 0);
+ if (psci_ret != PSCI_E_SUCCESS) {
+ tftf_testcase_printf("Failed to power on CPU 0x%x (%d)\n", \
+ cpu_mpid, psci_ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ core_pos = platform_get_core_pos(cpu_mpid);
+ tftf_wait_for_event(&cpu_has_entered_test[core_pos]);
+ }
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
index 9e9998c9c..7da63caaa 100644
--- a/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/cpu_suspend/test_suspend.c
@@ -20,13 +20,15 @@
#include <timer.h>
/*
- * Desired affinity level and state type (standby or powerdown) for the next
- * CPU_SUSPEND operation. We need these shared variables because there is no way
- * to pass arguments to non-lead CPUs...
+ * Desired affinity level, state type (standby or powerdown), and entry time for
+ * each CPU in the next CPU_SUSPEND operation. We need these shared variables
+ * because there is no way to pass arguments to non-lead CPUs...
*/
-static unsigned int test_aff_level;
-static unsigned int test_suspend_type;
+static unsigned int test_aff_level[PLATFORM_CORE_COUNT];
+static unsigned int test_suspend_type[PLATFORM_CORE_COUNT];
+static unsigned int test_suspend_entry_time[PLATFORM_CORE_COUNT];
+static event_t cpu_booted[PLATFORM_CORE_COUNT];
static event_t cpu_ready[PLATFORM_CORE_COUNT];
/*
@@ -53,6 +55,38 @@ static int requested_irq_handler(void *data)
return 0;
}
+static test_result_t test_init(unsigned int aff_level,
+ unsigned int suspend_type)
+{
+ if (aff_level > MPIDR_MAX_AFFLVL)
+ return TEST_RESULT_SKIPPED;
+
+ assert((suspend_type == PSTATE_TYPE_POWERDOWN) ||
+ (suspend_type == PSTATE_TYPE_STANDBY));
+
+ for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
+ /* Export these variables for the non-lead CPUs */
+ test_aff_level[i] = aff_level;
+ test_suspend_type[i] = suspend_type;
+ test_suspend_entry_time[i] =
+ PLAT_SUSPEND_ENTRY_TIME * PLATFORM_CORE_COUNT;
+
+ /*
+ * All testcases in this file use the same arrays so it needs to
+ * be re-initialised each time.
+ */
+ tftf_init_event(&cpu_booted[i]);
+ tftf_init_event(&cpu_ready[i]);
+ tftf_init_event(&event_received_wake_irq[i]);
+ requested_irq_received[i] = 0;
+ }
+
+ /* Ensure the above writes are seen before any read */
+ dmbsy();
+
+ return TEST_RESULT_SUCCESS;
+}
+
/*
* Suspend the calling (non-lead) CPU.
* 1) Program a wake-up event to come out of suspend state
@@ -64,21 +98,26 @@ static test_result_t suspend_non_lead_cpu(void)
{
unsigned int mpid = read_mpidr_el1();
unsigned int core_pos = platform_get_core_pos(mpid);
+ unsigned int aff_level = test_aff_level[core_pos];
+ unsigned int suspend_type = test_suspend_type[core_pos];
uint32_t power_state, stateid;
int rc, expected_return_val;
u_register_t flags;
tftf_timer_register_handler(requested_irq_handler);
- /* Tell the lead CPU that the calling CPU is about to suspend itself */
- tftf_send_event(&cpu_ready[core_pos]);
+ /* Signal to the lead CPU that the calling CPU has entered the test */
+ tftf_send_event(&cpu_booted[core_pos]);
+
+ /* Wait for signal from the lead CPU before suspending itself */
+ tftf_wait_for_event(&cpu_ready[core_pos]);
/* IRQs need to be disabled prior to programming the timer */
/* Preserve DAIF flags*/
flags = read_daif();
disable_irq();
- rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ rc = tftf_program_timer(test_suspend_entry_time[core_pos]);
if (rc != 0) {
/* Restore previous DAIF flags */
write_daif(flags);
@@ -87,15 +126,14 @@ static test_result_t suspend_non_lead_cpu(void)
return TEST_RESULT_FAIL;
}
- expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
- test_suspend_type, &stateid);
+ expected_return_val = tftf_psci_make_composite_state_id(aff_level,
+ suspend_type,
+ &stateid);
/*
* Suspend the calling CPU to the desired affinity level and power state
*/
- power_state = tftf_make_psci_pstate(test_aff_level,
- test_suspend_type,
- stateid);
+ power_state = tftf_make_psci_pstate(aff_level, suspend_type, stateid);
rc = tftf_cpu_suspend(power_state);
/* Restore previous DAIF flags */
@@ -126,38 +164,17 @@ static test_result_t suspend_non_lead_cpu(void)
*
* The test is skipped if an error occurs during the bring-up of non-lead CPUs.
*/
-static test_result_t test_psci_suspend(unsigned int aff_level,
- unsigned int suspend_type)
+static test_result_t test_psci_suspend(void)
{
unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
unsigned int target_mpid, target_node;
unsigned int core_pos;
+ unsigned int aff_level, suspend_type;
uint32_t power_state, stateid;
int rc, expected_return_val;
+ int aff_info;
u_register_t flags;
- if (aff_level > MPIDR_MAX_AFFLVL)
- return TEST_RESULT_SKIPPED;
-
- assert((suspend_type == PSTATE_TYPE_POWERDOWN) ||
- (suspend_type == PSTATE_TYPE_STANDBY));
-
- /* Export these variables for the non-lead CPUs */
- test_aff_level = aff_level;
- test_suspend_type = suspend_type;
-
- /*
- * All testcases in this file use the same cpu_ready[] array so it needs
- * to be re-initialised each time.
- */
- for (unsigned int i = 0; i < PLATFORM_CORE_COUNT; ++i) {
- tftf_init_event(&cpu_ready[i]);
- tftf_init_event(&event_received_wake_irq[i]);
- requested_irq_received[i] = 0;
- }
- /* Ensure the above writes are seen before any read */
- dmbsy();
-
/*
* Preparation step: Power on all cores.
*/
@@ -168,8 +185,8 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
continue;
rc = tftf_cpu_on(target_mpid,
- (uintptr_t) suspend_non_lead_cpu,
- 0);
+ (uintptr_t) suspend_non_lead_cpu,
+ 0);
if (rc != PSCI_E_SUCCESS) {
tftf_testcase_printf(
"Failed to power on CPU 0x%x (%d)\n",
@@ -178,7 +195,7 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
}
}
- /* Wait for all non-lead CPUs to be ready */
+ /* Wait for all non-lead CPUs to enter the test */
for_each_cpu(target_node) {
target_mpid = tftf_get_mpidr_from_node(target_node);
/* Skip lead CPU */
@@ -186,7 +203,19 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
continue;
core_pos = platform_get_core_pos(target_mpid);
- tftf_wait_for_event(&cpu_ready[core_pos]);
+ tftf_wait_for_event(&cpu_booted[core_pos]);
+ }
+
+ /* Signal to each non-lead CPU to suspend itself */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ tftf_send_event(&cpu_ready[core_pos]);
+ waitms(PLAT_SUSPEND_ENTRY_TIME);
}
/* IRQs need to be disabled prior to programming the timer */
@@ -198,7 +227,7 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
* Program the timer, this will serve as the
* wake-up event to come out of suspend state.
*/
- rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME);
+ rc = tftf_program_timer(PLAT_SUSPEND_ENTRY_TIME * PLATFORM_CORE_COUNT);
if (rc) {
/* Restore previous DAIF flags */
write_daif(flags);
@@ -207,16 +236,18 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
return TEST_RESULT_FAIL;
}
- expected_return_val = tftf_psci_make_composite_state_id(test_aff_level,
- test_suspend_type, &stateid);
+ core_pos = platform_get_core_pos(lead_mpid);
+ aff_level = test_aff_level[core_pos];
+ suspend_type = test_suspend_type[core_pos];
+ expected_return_val = tftf_psci_make_composite_state_id(aff_level,
+ suspend_type,
+ &stateid);
/*
* Suspend the calling CPU to the desired affinity level and power state
*/
- power_state = tftf_make_psci_pstate(test_aff_level,
- test_suspend_type,
- stateid);
- if (test_aff_level >= PSTATE_AFF_LVL_2)
+ power_state = tftf_make_psci_pstate(aff_level, suspend_type, stateid);
+ if (aff_level >= PSTATE_AFF_LVL_2)
rc = tftf_cpu_suspend_save_sys_ctx(power_state);
else
rc = tftf_cpu_suspend(power_state);
@@ -246,6 +277,19 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
tftf_wait_for_event(&event_received_wake_irq[core_pos]);
}
+ /* Wait for all non-lead CPUs to power down */
+ for_each_cpu(target_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ do {
+ aff_info = tftf_psci_affinity_info(target_mpid,
+ MPIDR_AFFLVL0);
+ } while (aff_info != PSCI_STATE_OFF);
+ }
+
if (rc == expected_return_val)
return TEST_RESULT_SUCCESS;
@@ -255,11 +299,27 @@ static test_result_t test_psci_suspend(unsigned int aff_level,
}
/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at the specified
+ * affinity level
+ */
+static test_result_t test_psci_suspend_level(unsigned int aff_level,
+ unsigned int suspend_type)
+{
+ int rc;
+
+ rc = test_init(aff_level, suspend_type);
+ if (rc != TEST_RESULT_SUCCESS)
+ return rc;
+
+ return test_psci_suspend();
+}
+
+/*
* @Test_Aim@ Suspend to powerdown state targeted at affinity level 0
*/
test_result_t test_psci_suspend_powerdown_level0(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_0, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -267,7 +327,7 @@ test_result_t test_psci_suspend_powerdown_level0(void)
*/
test_result_t test_psci_suspend_standby_level0(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_0, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_0, PSTATE_TYPE_STANDBY);
}
/*
@@ -275,7 +335,7 @@ test_result_t test_psci_suspend_standby_level0(void)
*/
test_result_t test_psci_suspend_powerdown_level1(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_1, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -283,7 +343,7 @@ test_result_t test_psci_suspend_powerdown_level1(void)
*/
test_result_t test_psci_suspend_standby_level1(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_1, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_1, PSTATE_TYPE_STANDBY);
}
/*
@@ -291,7 +351,7 @@ test_result_t test_psci_suspend_standby_level1(void)
*/
test_result_t test_psci_suspend_powerdown_level2(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_2, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -299,7 +359,7 @@ test_result_t test_psci_suspend_powerdown_level2(void)
*/
test_result_t test_psci_suspend_standby_level2(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_2, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_2, PSTATE_TYPE_STANDBY);
}
/*
@@ -307,7 +367,7 @@ test_result_t test_psci_suspend_standby_level2(void)
*/
test_result_t test_psci_suspend_powerdown_level3(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
}
/*
@@ -315,5 +375,317 @@ test_result_t test_psci_suspend_powerdown_level3(void)
*/
test_result_t test_psci_suspend_standby_level3(void)
{
- return test_psci_suspend(PSTATE_AFF_LVL_3, PSTATE_TYPE_STANDBY);
+ return test_psci_suspend_level(PSTATE_AFF_LVL_3, PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 0
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level0_osi(unsigned int suspend_type)
+{
+ int err, rc;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ rc = test_psci_suspend_level(PSTATE_AFF_LVL_0, suspend_type);
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 0 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level0_osi(void)
+{
+ return test_psci_suspend_level0_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 0 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level0_osi(void)
+{
+ return test_psci_suspend_level0_osi(PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 1
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level1_osi(unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_lvl_1_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_1);
+ unsigned int target_mpid, target_node, lvl_1_node, lvl_1_end_node;
+ unsigned int core_pos;
+ tftf_pwr_domain_node_t pd_node;
+ int err, rc;
+
+ err = test_init(PSTATE_AFF_LVL_1, suspend_type);
+ if (err != TEST_RESULT_SUCCESS)
+ return err;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ for_each_power_domain_idx(lvl_1_node, PSTATE_AFF_LVL_1) {
+ pd_node = tftf_pd_nodes[lvl_1_node];
+ lvl_1_end_node = pd_node.cpu_start_node + pd_node.ncpus - 1;
+
+ for_each_cpu_in_power_domain(target_node, lvl_1_node) {
+ target_mpid = tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ if (target_node == lvl_1_end_node &&
+ lvl_1_node != lead_lvl_1_node) {
+ test_aff_level[core_pos] = PSTATE_AFF_LVL_1;
+ } else {
+ test_aff_level[core_pos] = PSTATE_AFF_LVL_0;
+ }
+ }
+ }
+
+ rc = test_psci_suspend();
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 1 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level1_osi(void)
+{
+ return test_psci_suspend_level1_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 1 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level1_osi(void)
+{
+ return test_psci_suspend_level1_osi(PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 2
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level2_osi(unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_lvl_1_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_1);
+ unsigned int lead_lvl_2_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_2);
+ unsigned int target_mpid, target_node;
+ unsigned int lvl_1_node, lvl_2_node;
+ unsigned int lvl_1_end_node, lvl_2_end_node;
+ unsigned int core_pos;
+ tftf_pwr_domain_node_t lvl_1_pd_node, lvl_2_pd_node;
+ int err, rc;
+
+ err = test_init(PSTATE_AFF_LVL_2, suspend_type);
+ if (err != TEST_RESULT_SUCCESS)
+ return err;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ for_each_power_domain_idx(lvl_2_node, PSTATE_AFF_LVL_2) {
+ lvl_2_pd_node = tftf_pd_nodes[lvl_2_node];
+ lvl_2_end_node =
+ lvl_2_pd_node.cpu_start_node + lvl_2_pd_node.ncpus - 1;
+
+ for_each_power_domain_idx(lvl_1_node, PSTATE_AFF_LVL_1) {
+ lvl_1_pd_node = tftf_pd_nodes[lvl_1_node];
+ if (lvl_1_pd_node.parent_node != lvl_2_node)
+ continue;
+
+ lvl_1_end_node =
+ lvl_1_pd_node.cpu_start_node +
+ lvl_1_pd_node.ncpus - 1;
+
+ for_each_cpu_in_power_domain(target_node, lvl_1_node) {
+ target_mpid =
+ tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ if (target_node == lvl_1_end_node &&
+ target_node == lvl_2_end_node &&
+ lvl_2_node != lead_lvl_2_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_2;
+ } else if (target_node == lvl_1_end_node &&
+ lvl_1_node != lead_lvl_1_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_1;
+ } else {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_0;
+ }
+ }
+ }
+
+ }
+
+ rc = test_psci_suspend();
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 2 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level2_osi(void)
+{
+ return test_psci_suspend_level2_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 2 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level2_osi(void)
+{
+ return test_psci_suspend_level2_osi(PSTATE_TYPE_STANDBY);
+}
+
+/*
+ * @Test_Aim@ Suspend to the specified suspend type targeted at affinity level 3
+ * in OS-initiated mode
+ */
+static test_result_t test_psci_suspend_level3_osi(unsigned int suspend_type)
+{
+ unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
+ unsigned int lead_lvl_1_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_1);
+ unsigned int lead_lvl_2_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_2);
+ unsigned int lead_lvl_3_node =
+ tftf_get_parent_node_from_mpidr(lead_mpid, PSTATE_AFF_LVL_3);
+ unsigned int target_mpid, target_node;
+ unsigned int lvl_1_node, lvl_2_node, lvl_3_node;
+ unsigned int lvl_1_end_node, lvl_2_end_node, lvl_3_end_node;
+ unsigned int core_pos;
+ tftf_pwr_domain_node_t lvl_1_pd_node, lvl_2_pd_node, lvl_3_pd_node;
+ int err, rc;
+
+ err = test_init(PSTATE_AFF_LVL_3, PSTATE_TYPE_POWERDOWN);
+ if (err != TEST_RESULT_SUCCESS)
+ return err;
+
+ err = tftf_psci_set_suspend_mode(PSCI_OS_INIT);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ for_each_power_domain_idx(lvl_3_node, PSTATE_AFF_LVL_3) {
+ lvl_3_pd_node = tftf_pd_nodes[lvl_3_node];
+ lvl_3_end_node =
+ lvl_3_pd_node.cpu_start_node + lvl_3_pd_node.ncpus - 1;
+
+ for_each_power_domain_idx(lvl_2_node, PSTATE_AFF_LVL_2) {
+ lvl_2_pd_node = tftf_pd_nodes[lvl_2_node];
+ if (lvl_2_pd_node.parent_node != lvl_3_node)
+ continue;
+
+ lvl_2_end_node =
+ lvl_2_pd_node.cpu_start_node + lvl_2_pd_node.ncpus - 1;
+
+ for_each_power_domain_idx(lvl_1_node, PSTATE_AFF_LVL_1) {
+ lvl_1_pd_node = tftf_pd_nodes[lvl_1_node];
+ if (lvl_1_pd_node.parent_node != lvl_2_node)
+ continue;
+
+ lvl_1_end_node =
+ lvl_1_pd_node.cpu_start_node +
+ lvl_1_pd_node.ncpus - 1;
+
+ for_each_cpu_in_power_domain(target_node, lvl_1_node) {
+ target_mpid =
+ tftf_get_mpidr_from_node(target_node);
+ /* Skip lead CPU as it is already on */
+ if (target_mpid == lead_mpid)
+ continue;
+
+ core_pos = platform_get_core_pos(target_mpid);
+ if (target_node == lvl_1_end_node &&
+ target_node == lvl_2_end_node &&
+ target_node == lvl_3_end_node &&
+ lvl_3_node != lead_lvl_3_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_3;
+ }
+ if (target_node == lvl_1_end_node &&
+ target_node == lvl_2_end_node &&
+ lvl_2_node != lead_lvl_2_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_2;
+ } else if (target_node == lvl_1_end_node &&
+ lvl_1_node != lead_lvl_1_node) {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_1;
+ } else {
+ test_aff_level[core_pos] =
+ PSTATE_AFF_LVL_0;
+ }
+ }
+ }
+
+ }
+ }
+
+ rc = test_psci_suspend();
+
+ err = tftf_psci_set_suspend_mode(PSCI_PLAT_COORD);
+ if (err != PSCI_E_SUCCESS)
+ return TEST_RESULT_FAIL;
+
+ return rc;
+}
+
+/*
+ * @Test_Aim@ Suspend to powerdown state targeted at affinity level 3 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_powerdown_level3_osi(void)
+{
+ return test_psci_suspend_level3_osi(PSTATE_TYPE_POWERDOWN);
+}
+
+/*
+ * @Test_Aim@ Suspend to standby state targeted at affinity level 3 in
+ * OS-initiated mode
+ */
+test_result_t test_psci_suspend_standby_level3_osi(void)
+{
+ return test_psci_suspend_level3_osi(PSTATE_TYPE_STANDBY);
}
diff --git a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
index 4732796c1..2a1e9e70c 100644
--- a/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
+++ b/tftf/tests/runtime_services/standard_service/psci/api_tests/psci_stat/test_psci_stat.c
@@ -30,12 +30,16 @@ typedef struct psci_stat_data {
u_register_t residency;
} psci_stat_data_t;
-/* Assuming 3 power levels as maximum */
+/* Assuming 4 power levels as maximum */
#define MAX_STAT_STATES (PLAT_MAX_PWR_STATES_PER_LVL * \
PLAT_MAX_PWR_STATES_PER_LVL * \
+ PLAT_MAX_PWR_STATES_PER_LVL * \
PLAT_MAX_PWR_STATES_PER_LVL)
-CASSERT(PLAT_MAX_PWR_LEVEL <= 2, assert_maximum_defined_stat_array_size_exceeded);
+/* Based on PSCI_MAX_PWR_LVL in tf-a
+ * See: https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/tree/include/lib/psci/psci.h#n38
+ */
+CASSERT(PLAT_MAX_PWR_LEVEL <= 3, assert_maximum_defined_stat_array_size_exceeded);
/*
* The data structure holding stat information as queried by each CPU.
diff --git a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
index 99f685406..58ece719d 100644
--- a/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
+++ b/tftf/tests/runtime_services/standard_service/psci/system_tests/test_psci_on_off_suspend_stress.c
@@ -47,27 +47,6 @@ static unsigned int include_cpu_suspend;
static test_result_t secondary_cpu_on_race_test(void);
/*
- * Utility function to wait for all CPUs other than the caller to be
- * OFF.
- */
-static void wait_for_non_lead_cpus(void)
-{
- unsigned int lead_mpid = read_mpidr_el1() & MPID_MASK;
- unsigned int target_mpid, target_node;
-
- for_each_cpu(target_node) {
- target_mpid = tftf_get_mpidr_from_node(target_node);
- /* Skip lead CPU, as it is powered on */
- if (target_mpid == lead_mpid)
- continue;
-
- while (tftf_psci_affinity_info(target_mpid, MPIDR_AFFLVL0)
- != PSCI_STATE_OFF)
- ;
- }
-}
-
-/*
* Update per-cpu counter corresponding to the current CPU.
* This function updates 2 counters, one in normal memory and the other
* in coherent device memory. The counts are then compared to check if they
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
index 74fe4a60c..a88723465 100644
--- a/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/sdei_entrypoint.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -13,6 +13,7 @@
.globl sdei_entrypoint_resume
.globl sdei_handler_done
.globl sdei_rm_any_entrypoint
+ .globl sdei_check_pstate_entrypoint
.local event_handled
.comm event_handled, PLATFORM_CORE_COUNT * 4, 8
@@ -126,6 +127,33 @@ func sdei_rm_any_entrypoint
b .
endfunc sdei_rm_any_entrypoint
+func sdei_check_pstate_entrypoint
+ stp x2, x30, [sp, #-16]!
+
+ /* Dispatch to C handler */
+ bl sdei_check_pstate_handler
+
+ /* Calculate address of event completion variable */
+ mrs x0, mpidr_el1
+ mov_imm x1, MPID_MASK
+ and x0, x0, x1
+ bl platform_get_core_pos
+ lsl x0, x0, #2
+ adrp x1, event_handled
+ add x1, x1, :lo12:event_handled
+ add x1, x0, x1
+
+ /* Mark event handling as complete so `sdei_handler_done` can return */
+ mov w2, #1
+ str w2, [x1]
+ sev
+
+ /* Populate `x0` and `x1` to prepare for SMC call */
+ ldp x1, x30, [sp], #16
+ mov_imm x0, SDEI_EVENT_COMPLETE_AND_RESUME
+ smc #0
+endfunc sdei_check_pstate_entrypoint
+
#else /* AARCH32 */
func sdei_entrypoint
/* SDEI is not supported on AArch32. */
diff --git a/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c
new file mode 100644
index 000000000..339e4ba5b
--- /dev/null
+++ b/tftf/tests/runtime_services/standard_service/sdei/system_tests/test_sdei_pstate.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2021, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_features.h>
+#include <arch_helpers.h>
+#include <debug.h>
+#include <events.h>
+#include <plat_topology.h>
+#include <platform.h>
+#include <power_management.h>
+#include <sdei.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+#include <timer.h>
+
+#ifdef __aarch64__
+
+#define EV_COOKIE 0xDEADBEEF
+
+extern sdei_handler_t sdei_check_pstate_entrypoint;
+
+u_register_t daif;
+u_register_t sp;
+u_register_t pan;
+u_register_t dit;
+
+int sdei_check_pstate_handler(int ev, unsigned long long arg)
+{
+ printf("%s: handler fired\n", __func__);
+ daif = read_daif();
+ sp = read_spsel();
+ if (is_armv8_1_pan_present())
+ pan = read_pan();
+
+ if (is_armv8_4_dit_present())
+ dit = read_dit();
+
+ assert(arg == EV_COOKIE);
+ return 0;
+}
+
+static test_result_t sdei_event_check_pstate(void)
+{
+ long long ret;
+
+ ret = sdei_event_register(0, sdei_check_pstate_entrypoint, EV_COOKIE,
+ SDEI_REGF_RM_PE, read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event register failed: 0x%llx\n",
+ ret);
+ return TEST_RESULT_FAIL;
+ }
+
+ ret = sdei_event_enable(0);
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event enable failed: 0x%llx\n", ret);
+ goto err0;
+ }
+
+ ret = sdei_pe_unmask();
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI pe unmask failed: 0x%llx\n", ret);
+ goto err1;
+ }
+
+ /* Check the common bits are set correctly */
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: 0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+
+ u_register_t all_interrupts_masked = 0x3c0;
+
+ if (daif != all_interrupts_masked) {
+ tftf_testcase_printf("Interrupts were not correctly masked " \
+ "during SDEI event signal\n" \
+ "Expected DAIF: 0x%lx, " \
+ "Actual DAIF: 0x%lx\n",
+ all_interrupts_masked, daif);
+ ret = -1;
+ goto err1;
+ }
+
+ u_register_t use_sp_elx = 0x1;
+
+ if (sp != use_sp_elx) {
+ tftf_testcase_printf("The SPSel PSTATE Bit was not set " \
+ "correctly during SDEI event signal\n" \
+ "Expected SPSel: 0x%lx, " \
+ "Actual SPSel: 0x%lx\n",
+ use_sp_elx, sp);
+ ret = -1;
+ goto err1;
+ }
+
+ if (is_armv8_1_pan_present()) {
+ printf("PAN Enabled so testing PAN PSTATE bit\n");
+
+ /* Test that the SPAN condition is met.
+ * Unset the SPAN bit
+ */
+ u_register_t old_sctlr = read_sctlr_el2();
+
+ write_sctlr_el2(old_sctlr & ~SCTLR_SPAN_BIT);
+
+ u_register_t old_hcr_el2 = read_hcr_el2();
+
+ /*
+ * Check that when the SPAN bit is 0
+ * the PAN PSTATE bit is maintained
+ */
+
+ if ((old_hcr_el2 & HCR_TGE_BIT) == 0U) {
+ /*
+ * Check that when the HCR_EL2.TGE != 1
+ * the PAN bit is maintained
+ */
+
+ /* When PAN bit is 0 */
+ u_register_t expected_pan = 0;
+ write_pan(expected_pan);
+
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (pan != expected_pan) {
+ tftf_testcase_printf("PAN PSTATE bit not maintained" \
+ "during SDEI event signal " \
+ "when the SPAN bit is unset and " \
+ "HCR_EL2.TGE != 1 \n" \
+ "Expected PAN: 0x%lx, " \
+ "Actual PAN: 0x%lx\n",
+ expected_pan, pan);
+ ret = -1;
+ goto err1;
+ }
+
+ /* When PAN Bit is 1 */
+ expected_pan = PAN_BIT;
+ write_pan(expected_pan);
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (pan != expected_pan) {
+ tftf_testcase_printf("PAN PSTATE bit not maintained" \
+ "during SDEI event signal " \
+ "when the SPAN bit is unset and " \
+ "HCR_EL2.TGE != 1 \n" \
+ "Expected PAN: 0x%lx, " \
+ "Actual PAN: 0x%lx\n",
+ expected_pan, pan);
+ ret = -1;
+ goto err1;
+ }
+
+ }
+
+ /*
+ * Check that when the HCR_EL2.TGE = 1 and SPAN bit is unset,
+ * PAN bit is forced to 1.
+ * Set the TGE bit
+ */
+
+ write_hcr_el2(old_hcr_el2 | HCR_TGE_BIT);
+
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (pan != PAN_BIT) {
+ tftf_testcase_printf("PAN PSTATE bit was not forced " \
+ "to 1 during SDEI event signal " \
+ "when the SPAN bit is unset and " \
+ "HCR_EL2.TGE = 1 \n");
+ ret = -1;
+ goto err1;
+ }
+
+ /*
+ * Set the SCTLR and HCR_EL2 registers back to their old values
+ */
+ write_sctlr_el2(old_sctlr);
+ write_hcr_el2(old_hcr_el2);
+ }
+
+ /* Check that the DIT PSTATE bit is maintained during event signal */
+ if (is_armv8_4_dit_present()) {
+ printf("DIT Enabled so testing DIT PSTATE bit\n");
+ /* When DIT bit is 0 */
+ u_register_t expected_dit = 0;
+
+ write_dit(expected_dit);
+ ret = sdei_event_signal(read_mpidr_el1());
+
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (dit != expected_dit) {
+ tftf_testcase_printf("DIT PSTATE bit not maintained " \
+ "during SDEI event signal\n" \
+ "Expected DIT: 0x%lx, " \
+ "Actual DIT: 0x%lx\n",
+ expected_dit, dit);
+ ret = -1;
+ goto err1;
+ }
+
+ /* When dit bit is 1 */
+ expected_dit = DIT_BIT;
+ write_dit(expected_dit);
+ ret = sdei_event_signal(read_mpidr_el1());
+ if (ret < 0) {
+ tftf_testcase_printf("SDEI event signal failed: " \
+ "0x%llx\n", ret);
+ goto err2;
+ }
+ sdei_handler_done();
+ if (dit != expected_dit) {
+ tftf_testcase_printf("DIT PSTATE bit not maintained " \
+ "during SDEI event signal\n" \
+ "Expected DIT: 0x%lx, " \
+ "Actual DIT: 0x%lx\n",
+ expected_dit, dit);
+ ret = -1;
+ goto err1;
+ }
+ }
+
+err2:
+ sdei_pe_mask();
+err1:
+ sdei_event_disable(0);
+err0:
+ sdei_event_unregister(0);
+
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+
+ return TEST_RESULT_SUCCESS;
+}
+#endif /* __aarch64__ */
+
+/* Each core signals itself using SDEI event signalling. */
+test_result_t test_sdei_event_check_pstate(void)
+{
+ SKIP_TEST_IF_AARCH32();
+#ifdef __aarch64__
+ long long ret;
+
+ ret = sdei_version();
+ if (ret != MAKE_SDEI_VERSION(1, 0, 0)) {
+ tftf_testcase_printf("Unexpected SDEI version: 0x%llx\n", ret);
+ return TEST_RESULT_SKIPPED;
+ }
+
+ disable_irq();
+ /* We only need to run these tests on the main CPU */
+ if (sdei_event_check_pstate() != TEST_RESULT_SUCCESS) {
+ ret = -1;
+ goto err0;
+ }
+
+err0:
+ enable_irq();
+ if (ret < 0)
+ return TEST_RESULT_FAIL;
+ return TEST_RESULT_SUCCESS;
+#endif /* __aarch64__ */
+}
diff --git a/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c b/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
index 64b8db78f..72a4ec5ab 100644
--- a/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
+++ b/tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@@ -29,7 +29,6 @@ test_result_t test_trng_version(void)
return TEST_RESULT_SKIPPED;
}
-
if (version < TRNG_VERSION(1, 0)) {
return TEST_RESULT_FAIL;
}
@@ -51,8 +50,7 @@ test_result_t test_trng_features(void)
return TEST_RESULT_SKIPPED;
}
- if (!(tftf_trng_feature_implemented(SMC_TRNG_VERSION) &&
- tftf_trng_feature_implemented(SMC_TRNG_FEATURES) &&
+ if (!(tftf_trng_feature_implemented(SMC_TRNG_FEATURES) &&
tftf_trng_feature_implemented(SMC_TRNG_UUID) &&
tftf_trng_feature_implemented(SMC_TRNG_RND))) {
return TEST_RESULT_FAIL;
@@ -75,6 +73,11 @@ test_result_t test_trng_rnd(void)
return TEST_RESULT_SKIPPED;
}
+ /* Ensure function is implemented before requesting Entropy */
+ if (!(tftf_trng_feature_implemented(SMC_TRNG_RND))) {
+ return TEST_RESULT_FAIL;
+ }
+
/* Test invalid entropy sizes */
rnd_out = tftf_trng_rnd(U(0));
if (rnd_out.ret0 != TRNG_E_INVALID_PARAMS) {
@@ -97,7 +100,7 @@ test_result_t test_trng_rnd(void)
/* For N = 1, all returned entropy bits should be 0
* except the least significant bit */
rnd_out = tftf_trng_rnd(U(1));
- if (rnd_out.ret0 == TRNG_E_NO_ENTOPY) {
+ if (rnd_out.ret0 == TRNG_E_NO_ENTROPY) {
WARN("There is not a single bit of entropy\n");
return TEST_RESULT_SKIPPED;
}
@@ -116,7 +119,7 @@ test_result_t test_trng_rnd(void)
/* For N = MAX_BITS-1, the most significant bit should be 0 */
rnd_out = tftf_trng_rnd(TRNG_MAX_BITS - U(1));
- if (rnd_out.ret0 == TRNG_E_NO_ENTOPY) {
+ if (rnd_out.ret0 == TRNG_E_NO_ENTROPY) {
WARN("There is not a single bit of entropy\n");
return TEST_RESULT_SKIPPED;
}
diff --git a/tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c b/tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c
new file mode 100644
index 000000000..a17a54e1a
--- /dev/null
+++ b/tftf/tests/runtime_services/trusted_os/tsp/test_pstate_after_exception.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2022, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <test_helpers.h>
+
+/*
+ * Test that the PSTATE bits not set in Aarch64.TakeException but
+ * set to a default when taking an exception to EL3 are maintained
+ * after an exception and that changes in TSP do not effect the PSTATE
+ * in TFTF and vice versa.
+ */
+test_result_t tsp_check_pstate_maintained_on_exception(void)
+{
+ smc_args tsp_svc_params;
+ smc_ret_values ret;
+ u_register_t dit;
+ u_register_t dit_bit;
+
+ SKIP_TEST_IF_TSP_NOT_PRESENT();
+ SKIP_TEST_IF_DIT_NOT_SUPPORTED();
+
+#ifdef __aarch64__
+ dit_bit = DIT_BIT;
+#else
+ dit_bit = CPSR_DIT_BIT;
+#endif
+
+ write_dit(dit_bit);
+
+ /* Standard SMC */
+ tsp_svc_params.fid = TSP_STD_FID(TSP_CHECK_DIT);
+ tsp_svc_params.arg1 = 0;
+ tsp_svc_params.arg2 = 0;
+ ret = tftf_smc(&tsp_svc_params);
+ if (ret.ret1 == 0) {
+ if (ret.ret2 == 0xffff) {
+ tftf_testcase_printf("DIT bit not supported by TSP");
+ return TEST_RESULT_SKIPPED;
+ }
+ tftf_testcase_printf("DIT bit in the TSP is not 0.\n");
+ return TEST_RESULT_FAIL;
+ }
+
+ dit = read_dit();
+ if (dit != dit_bit) {
+ tftf_testcase_printf("DIT bit in TFTF was not maintained.\n"
+ "Expected: 0x%x, Actual: 0x%x",
+ (uint32_t) dit_bit, (uint32_t) dit);
+ return TEST_RESULT_FAIL;
+ }
+
+ tsp_svc_params.fid = TSP_STD_FID(TSP_CHECK_DIT);
+ tsp_svc_params.arg1 = dit_bit;
+ tsp_svc_params.arg2 = 0;
+ ret = tftf_smc(&tsp_svc_params);
+ if (ret.ret1 == 0) {
+ tftf_testcase_printf("DIT bit in the TSP was not maintained\n"
+ "Expected: 0x%x, Actual: 0x%x",
+ (uint32_t) dit_bit, (uint32_t) ret.ret2);
+ return TEST_RESULT_FAIL;
+ }
+
+ dit = read_dit();
+ if (dit != dit_bit) {
+ tftf_testcase_printf("DIT bit in TFTF was not maintained.\n"
+ "Expected: 0x%x, Actual: 0x%x",
+ (uint32_t) dit_bit, (uint32_t) dit);
+ return TEST_RESULT_FAIL;
+ }
+
+ return TEST_RESULT_SUCCESS;
+}
diff --git a/tftf/tests/tbb-tests/tbb_test_infra.c b/tftf/tests/tbb-tests/tbb_test_infra.c
new file mode 100644
index 000000000..dc8ae385d
--- /dev/null
+++ b/tftf/tests/tbb-tests/tbb_test_infra.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "tbb_test_infra.h"
+
+#include <fwu_nvm.h>
+#include <io_storage.h>
+#include <platform.h>
+#include <status.h>
+#include <test_helpers.h>
+#include <tftf_lib.h>
+
+test_result_t test_corrupt_boot_fip(unsigned int offset)
+{
+ unsigned int flag = 0xDEADBEEF;
+ size_t written = 0;
+ uintptr_t dev_handle;
+ int result;
+
+ if (tftf_is_rebooted()) {
+ /* FIP successfully repaired */
+ return TEST_RESULT_SUCCESS;
+ }
+
+ /* Corrupt the FIP at the provided offset */
+ plat_get_nvm_handle(&dev_handle);
+ result = io_seek(dev_handle, IO_SEEK_SET, offset);
+ TEST_ASSERT(result == IO_SUCCESS);
+ result = io_write(dev_handle, (uintptr_t) &flag, sizeof(flag), &written);
+ TEST_ASSERT(result == IO_SUCCESS);
+ TEST_ASSERT(written == sizeof(flag));
+
+ /*
+ * Now reboot the system.
+ * On the next boot, EL3 firmware should notice and repair the corruption
+ * before re-entering TFTF
+ */
+
+ tftf_notify_reboot();
+ psci_system_reset();
+ return TEST_RESULT_FAIL;
+}
diff --git a/tftf/tests/tbb-tests/tbb_test_infra.h b/tftf/tests/tbb-tests/tbb_test_infra.h
new file mode 100644
index 000000000..e6bf0e598
--- /dev/null
+++ b/tftf/tests/tbb-tests/tbb_test_infra.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef TBB_TEST_INFRA_H_INCLUDED
+#define TBB_TEST_INFRA_H_INCLUDED
+
+#include <tftf_lib.h>
+
+test_result_t test_corrupt_boot_fip(unsigned int offset);
+
+#endif /* TBB_TEST_INFRA_H_INCLUDED */
+
diff --git a/tftf/tests/tbb-tests/test_tbb_corrupt_fip.c b/tftf/tests/tbb-tests/test_tbb_corrupt_fip.c
new file mode 100644
index 000000000..135efeea0
--- /dev/null
+++ b/tftf/tests/tbb-tests/test_tbb_corrupt_fip.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2024, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <uuid.h>
+
+#include "tbb_test_infra.h"
+
+#include <firmware_image_package.h>
+#include <platform.h>
+#include <tftf_lib.h>
+#include <uuid_utils.h>
+
+/*
+ * Return the offset relative to the base of the FIP of
+ * the image described by the uuid. 0 is returned on failure.
+ * The first image will not have an offset of 0, as the header
+ * exists at offset 0.
+ */
+static unsigned int
+find_offset_in_fip(const uuid_t *uuid)
+{
+ fip_toc_entry_t *current_file =
+ (fip_toc_entry_t *) (PLAT_ARM_FIP_BASE + sizeof(fip_toc_header_t));
+
+ while (!is_uuid_null(&(current_file->uuid))) {
+ if (uuid_equal(&(current_file->uuid), uuid)) {
+ return current_file->offset_address;
+ }
+ current_file += 1;
+ };
+ return 0;
+}
+
+test_result_t test_tbb_tkey_cert_header(void)
+{
+ static const uuid_t tkey_cert_uuid = UUID_TRUSTED_KEY_CERT;
+ unsigned int image_offset = find_offset_in_fip(&tkey_cert_uuid);
+
+ TEST_ASSERT_SKIP(image_offset != 0);
+ return test_corrupt_boot_fip(image_offset);
+}
+
diff --git a/tftf/tests/tests-corrupt-fip.mk b/tftf/tests/tests-corrupt-fip.mk
new file mode 100644
index 000000000..22fa686ed
--- /dev/null
+++ b/tftf/tests/tests-corrupt-fip.mk
@@ -0,0 +1,15 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/tbb-tests/, \
+ test_tbb_corrupt_fip.c \
+ tbb_test_infra.c \
+)
+
+TESTS_SOURCES += plat/common/fwu_nvm_accessors.c \
+ plat/arm/common/arm_fwu_io_storage.c \
+ drivers/io/io_fip.c \
+ drivers/io/io_memmap.c
diff --git a/tftf/tests/tests-corrupt-fip.xml b/tftf/tests/tests-corrupt-fip.xml
new file mode 100644
index 000000000..6bfa4a48a
--- /dev/null
+++ b/tftf/tests/tests-corrupt-fip.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="tbb corrupt trusted key header" description="The FIP is corrupted before update">
+ <testcase name="tbb bad tkey cert header" function="test_tbb_tkey_cert_header" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-cpu-extensions.mk b/tftf/tests/tests-cpu-extensions.mk
index fedf7837d..b0af1a3af 100644
--- a/tftf/tests/tests-cpu-extensions.mk
+++ b/tftf/tests/tests-cpu-extensions.mk
@@ -1,18 +1,29 @@
#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2024, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
TESTS_SOURCES += $(addprefix tftf/tests/, \
+ extensions/afp/test_afp.c \
extensions/amu/test_amu.c \
+ extensions/brbe/test_brbe.c \
+ extensions/ecv/test_ecv.c \
+ extensions/fgt/test_fgt.c \
+ extensions/pmuv3/test_pmuv3.c \
+ extensions/mpam/test_mpam.c \
extensions/mte/test_mte.c \
- extensions/sve/sve_operations.S \
+ extensions/pauth/test_pauth.c \
+ extensions/sme/test_sme.c \
+ extensions/sme/test_sme2.c \
+ extensions/spe/test_spe.c \
extensions/sve/test_sve.c \
- extensions/fgt/test_fgt.c \
- extensions/ecv/test_ecv.c \
+ extensions/sys_reg_trace/test_sys_reg_trace.c \
+ extensions/trbe/test_trbe.c \
+ extensions/trf/test_trf.c \
+ extensions/wfxt/test_wfxt.c \
+ runtime_services/arm_arch_svc/smccc_arch_soc_id.c \
runtime_services/arm_arch_svc/smccc_arch_workaround_1.c \
runtime_services/arm_arch_svc/smccc_arch_workaround_2.c \
- runtime_services/arm_arch_svc/smccc_arch_soc_id.c \
- extensions/pauth/test_pauth.c \
+ runtime_services/arm_arch_svc/smccc_arch_workaround_3.c \
)
diff --git a/tftf/tests/tests-cpu-extensions.xml b/tftf/tests/tests-cpu-extensions.xml
index 08a65c7fc..3b9334468 100644
--- a/tftf/tests/tests-cpu-extensions.xml
+++ b/tftf/tests/tests-cpu-extensions.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2024, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -16,15 +16,30 @@
<testcase name="Use Pointer Authentication Instructions" function="test_pauth_instructions" />
<testcase name="Check for Pointer Authentication key leakage from EL3" function="test_pauth_leakage" />
<testcase name="Check for Pointer Authentication key leakage from TSP" function="test_pauth_leakage_tsp" />
+ <testcase name="Access MPAM registers" function="test_mpam_reg_access" />
<testcase name="Use MTE Instructions" function="test_mte_instructions" />
<testcase name="Check for MTE register leakage" function="test_mte_leakage" />
<testcase name="Use FGT Registers" function="test_fgt_enabled" />
<testcase name="Use ECV Registers" function="test_ecv_enabled" />
+ <testcase name="Use trace buffer control Registers" function="test_trbe_enabled" />
+ <testcase name="Use branch record buffer control registers" function="test_brbe_enabled" />
+ <testcase name="Use trace filter control Registers" function="test_trf_enabled" />
+ <testcase name="Use trace system Registers" function="test_sys_reg_trace_enabled" />
+ <testcase name="SME support" function="test_sme_support" />
+ <testcase name="SME2 support" function="test_sme2_support" />
+ <testcase name="SPE support" function="test_spe_support" />
+ <testcase name="AFP support" function="test_afp_support" />
+ <testcase name="Test wfit instruction" function="test_wfit_instruction" />
+ <testcase name="Test wfet instruction" function="test_wfet_instruction" />
+ <testcase name="PMUv3 cycle counter functional in NS" function="test_pmuv3_cycle_works_ns" />
+ <testcase name="PMUv3 event counter functional in NS" function="test_pmuv3_event_works_ns" />
+ <testcase name="PMUv3 SMC counter preservation" function="test_pmuv3_el3_preserves" />
</testsuite>
<testsuite name="ARM_ARCH_SVC" description="Arm Architecture Service tests">
<testcase name="SMCCC_ARCH_WORKAROUND_1 test" function="test_smccc_arch_workaround_1" />
<testcase name="SMCCC_ARCH_WORKAROUND_2 test" function="test_smccc_arch_workaround_2" />
+ <testcase name="SMCCC_ARCH_WORKAROUND_3 test" function="test_smccc_arch_workaround_3" />
<testcase name="SMCCC_ARCH_SOC_ID test" function="test_smccc_arch_soc_id" />
</testsuite>
diff --git a/tftf/tests/tests-ea-ffh.mk b/tftf/tests/tests-ea-ffh.mk
new file mode 100644
index 000000000..be0eb65c0
--- /dev/null
+++ b/tftf/tests/tests-ea-ffh.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += tftf/tests/misc_tests/test_ea_ffh.c
diff --git a/tftf/tests/tests-ea-ffh.xml b/tftf/tests/tests-ea-ffh.xml
new file mode 100644
index 000000000..1d31b8cff
--- /dev/null
+++ b/tftf/tests/tests-ea-ffh.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Inject External aborts" description="Injected EA's gets handled in EL3">
+ <testcase name="Inject syncEA which gets handled in EL3" function="test_inject_syncEA" />
+ <testcase name="Inject Serror which gets handled in EL3" function="test_inject_serror" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-errata_abi.mk b/tftf/tests/tests-errata_abi.mk
new file mode 100644
index 000000000..410dc13fe
--- /dev/null
+++ b/tftf/tests/tests-errata_abi.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += tftf/tests/runtime_services/standard_service/errata_abi/api_tests/test_errata_abi_functionality.c
diff --git a/tftf/tests/tests-errata_abi.xml b/tftf/tests/tests-errata_abi.xml
new file mode 100644
index 000000000..d1964757e
--- /dev/null
+++ b/tftf/tests/tests-errata_abi.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="EM-ABI" description="Errata ABI Feature Implementation">
+ <testcase name="Version" function="test_em_version" />
+ <testcase name="EM_cpu_features" function="test_errata_abi_features" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-extensive.mk b/tftf/tests/tests-extensive.mk
index d1c6c5e08..5e0f0068b 100644
--- a/tftf/tests/tests-extensive.mk
+++ b/tftf/tests/tests-extensive.mk
@@ -6,6 +6,8 @@
# Run all standard tests, plus the extensive ones.
include tftf/tests/tests-standard.mk
-TESTS_MAKEFILE += tftf/tests/tests-psci-extensive.mk
+
+TESTS_MAKEFILE += tftf/tests/tests-psci-extensive.mk \
+ tftf/tests/tests-timer-stress.mk
include ${TESTS_MAKEFILE}
diff --git a/tftf/tests/tests-extensive.xml b/tftf/tests/tests-extensive.xml
index 773c19e0f..e861b4866 100644
--- a/tftf/tests/tests-extensive.xml
+++ b/tftf/tests/tests-extensive.xml
@@ -10,36 +10,12 @@
<!-- External references to all individual tests files. -->
<!DOCTYPE testsuites [
<!ENTITY tests-psci-extensive SYSTEM "tests-psci-extensive.xml">
-
- <!ENTITY tests-tftf-validation SYSTEM "tests-tftf-validation.xml">
- <!ENTITY tests-boot-req SYSTEM "tests-boot-req.xml">
- <!ENTITY tests-psci SYSTEM "tests-psci.xml">
- <!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
- <!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
- <!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
- <!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
- <!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
- <!ENTITY tests-cpu-extensions SYSTEM "tests-cpu-extensions.xml">
- <!ENTITY tests-performance SYSTEM "tests-performance.xml">
- <!ENTITY tests-smc SYSTEM "tests-smc.xml">
- <!ENTITY tests-pmu-leakage SYSTEM "tests-pmu-leakage.xml">
+ <!ENTITY tests-timer-stress SYSTEM "tests-timer-stress.xml">
]>
<testsuites>
&tests-psci-extensive;
-
- &tests-tftf-validation;
- &tests-boot-req;
- &tests-psci;
- &tests-sdei;
- &tests-rt-instr;
- &tests-tsp;
- &tests-el3-pstate;
- &tests-state-switch;
- &tests-cpu-extensions;
- &tests-performance;
- &tests-smc;
- &tests-pmu-leakage;
+ &tests-timer-stress;
</testsuites>
diff --git a/tftf/tests/tests-firmware-handoff.mk b/tftf/tests/tests-firmware-handoff.mk
new file mode 100644
index 000000000..515188afa
--- /dev/null
+++ b/tftf/tests/tests-firmware-handoff.mk
@@ -0,0 +1,13 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${TRANSFER_LIST}, 1)
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ test_firmware_handoff.c \
+)
+
+endif
diff --git a/tftf/tests/tests-firmware-handoff.xml b/tftf/tests/tests-firmware-handoff.xml
new file mode 100644
index 000000000..4b4b2a468
--- /dev/null
+++ b/tftf/tests/tests-firmware-handoff.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Firmware Handoff" description="Validate transfer list managed by firmware handoff framework">
+ <testcase name="Validate transfer list header" function="test_handoff_header" />
+ <testcase name="Validate HW_CONFIG in transfer list" function="test_handoff_dtb_payload" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-hcx.mk b/tftf/tests/tests-hcx.mk
new file mode 100644
index 000000000..ba7cd78f0
--- /dev/null
+++ b/tftf/tests/tests-hcx.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2021, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/, \
+ extensions/hcx/test_hcx.c \
+)
diff --git a/tftf/tests/tests-hcx.xml b/tftf/tests/tests-hcx.xml
new file mode 100644
index 000000000..5b7f947f3
--- /dev/null
+++ b/tftf/tests/tests-hcx.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2021, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="FEAT_HCX" description="Tests that HCRX_EL2 access has been granted by EL3.">
+ <testcase name="Test access to HCRX_EL2" function="test_feat_hcx_enabled" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-memory-access.mk b/tftf/tests/tests-memory-access.mk
new file mode 100644
index 000000000..13b22416f
--- /dev/null
+++ b/tftf/tests/tests-memory-access.mk
@@ -0,0 +1,30 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
+TESTS_SOURCES += tftf/tests/misc_tests/test_invalid_access.c
+
+ifeq (${ARCH},aarch64)
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_realm_rmi.c \
+ host_realm_helper.c \
+ )
+
+endif
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
+ ffa_helpers.c \
+ spm_common.c \
+ test_ffa_memory_sharing.c \
+ test_ffa_setup_and_discovery.c \
+ spm_test_helpers.c \
+)
diff --git a/tftf/tests/tests-memory-access.xml b/tftf/tests/tests-memory-access.xml
new file mode 100644
index 000000000..4318cc920
--- /dev/null
+++ b/tftf/tests/tests-memory-access.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2024, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="FF-A Memory Sharing (RME)"
+ description="Test FF-A Memory Sharing ABIs" >
+ <testcase name="Lend Memory to Secure World"
+ function="test_mem_lend_sp" />
+ <testcase name="Lend memory, clear flag set"
+ function="test_mem_share_to_sp_clear_memory"/>
+ <testcase name="Share Memory with Secure World"
+ function="test_mem_share_sp" />
+ <testcase name="Donate Memory to Secure World"
+ function="test_mem_donate_sp"/>
+ <testcase name="Request Share Memory SP-to-SP"
+ function="test_req_mem_share_sp_to_sp" />
+ <testcase name="Request Lend Memory SP-to-SP"
+ function="test_req_mem_lend_sp_to_sp" />
+ <testcase name="Request Donate Memory SP-to-SP"
+ function="test_req_mem_donate_sp_to_sp" />
+ <testcase name="Request Share NS Memory (large PA) SP-to-SP"
+ function="test_req_ns_mem_share_sp_to_sp" />
+ <testcase name="Request Share Memory SP-to-VM"
+ function="test_req_mem_share_sp_to_vm" />
+ <testcase name="Request Lend Memory SP-to-VM"
+ function="test_req_mem_lend_sp_to_vm" />
+ <testcase name="Share forbidden memory with SP"
+ function="test_share_forbidden_ranges" />
+ <testcase name="Donate consecutively"
+ function="test_consecutive_donate" />
+ </testsuite>
+
+ <testsuite name="Invalid memory access" description="Invalid memory access">
+ <testcase name="Access EL3 memory from NS world"
+ function="el3_memory_cannot_be_accessed_in_ns" />
+ <testcase name="Access Secure memory from NS world"
+ function="s_memory_cannot_be_accessed_in_ns" />
+ </testsuite>
+
+ <testsuite name="Invalid memory access with RME extension"
+ description="Invalid memory access with RME extension">
+ <testcase name="Access Realm memory from NS world"
+ function="rl_memory_cannot_be_accessed_in_ns" />
+ <testcase name="Access Secure memory from Realm world"
+ function="s_memory_cannot_be_accessed_in_rl" />
+ <testcase name="Access Root memory from Realm world"
+ function="rt_memory_cannot_be_accessed_in_rl" />
+ <testcase name="Share memory to an SP from a Root region"
+ function="rt_memory_cannot_be_accessed_in_s" />
+ <testcase name="FF-A memory share fails if using realm memory"
+ function="test_ffa_mem_send_sp_realm_memory" />
+ <testcase name="FF-A memory share fail realm memory other constituent"
+ function="test_ffa_mem_lend_sp_realm_memory_separate_constituent" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-nop.mk b/tftf/tests/tests-nop.mk
new file mode 100644
index 000000000..0cad4828f
--- /dev/null
+++ b/tftf/tests/tests-nop.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ test_nop.c \
+ )
diff --git a/tftf/tests/tests-nop.xml b/tftf/tests/tests-nop.xml
new file mode 100644
index 000000000..021d0a656
--- /dev/null
+++ b/tftf/tests/tests-nop.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="NOP tests" description="Dummy tests">
+ <testcase name="Test which calls a NOP function"
+ function="test_nop" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-psci.xml b/tftf/tests/tests-psci.xml
index e2be55724..e9c612bd8 100644
--- a/tftf/tests/tests-psci.xml
+++ b/tftf/tests/tests-psci.xml
@@ -51,6 +51,18 @@
<testcase name="CPU suspend to standby at level 3" function="test_psci_suspend_standby_level3" />
</testsuite>
+ <testsuite name="PSCI CPU Suspend in OSI mode" description="Test PSCI CPU Suspend support in OSI mode">
+ <testcase name="CPU suspend to powerdown at level 0 in OSI mode" function="test_psci_suspend_powerdown_level0_osi" />
+ <testcase name="CPU suspend to powerdown at level 1 in OSI mode" function="test_psci_suspend_powerdown_level1_osi" />
+ <testcase name="CPU suspend to powerdown at level 2 in OSI mode" function="test_psci_suspend_powerdown_level2_osi" />
+ <testcase name="CPU suspend to powerdown at level 3 in OSI mode" function="test_psci_suspend_powerdown_level3_osi" />
+
+ <testcase name="CPU suspend to standby at level 0 in OSI mode" function="test_psci_suspend_standby_level0_osi" />
+ <testcase name="CPU suspend to standby at level 1 in OSI mode" function="test_psci_suspend_standby_level1_osi" />
+ <testcase name="CPU suspend to standby at level 2 in OSI mode" function="test_psci_suspend_standby_level2_osi" />
+ <testcase name="CPU suspend to standby at level 3 in OSI mode" function="test_psci_suspend_standby_level3_osi" />
+ </testsuite>
+
<testsuite name="PSCI STAT" description="Test PSCI STAT support Core level">
<testcase name="for valid composite state CPU suspend" function="test_psci_stat_all_power_states" />
<testcase name="Stats test cases for CPU OFF" function="test_psci_stats_cpu_off" />
diff --git a/tftf/tests/tests-quark.mk b/tftf/tests/tests-quark.mk
deleted file mode 100644
index 0504936b0..000000000
--- a/tftf/tests/tests-quark.mk
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-TESTS_SOURCES += \
- tftf/tests/runtime_services/secure_service/spci_helpers.c \
- tftf/tests/runtime_services/secure_service/test_quark_request.c
diff --git a/tftf/tests/tests-quark.xml b/tftf/tests/tests-quark.xml
deleted file mode 100644
index 109fa229f..000000000
--- a/tftf/tests/tests-quark.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-
-<!--
- Copyright (c) 2019, Arm Limited. All rights reserved.
-
- SPDX-License-Identifier: BSD-3-Clause
--->
-
-<testsuites>
-
- <testsuite name="Secure Partition Quark tests"
- description="Test related to the Secure Partition Quark">
-
- <testcase name="Send simple request to Quark"
- function="test_quark_request" />
-
- </testsuite>
-
-</testsuites>
diff --git a/tftf/tests/tests-ras-ffh-nested.mk b/tftf/tests/tests-ras-ffh-nested.mk
new file mode 100644
index 000000000..1adcf8043
--- /dev/null
+++ b/tftf/tests/tests-ras-ffh-nested.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ inject_ras_error.S \
+ test_ras_ffh_nested.c \
+)
diff --git a/tftf/tests/tests-ras-ffh-nested.xml b/tftf/tests/tests-ras-ffh-nested.xml
new file mode 100644
index 000000000..8dfb6933b
--- /dev/null
+++ b/tftf/tests/tests-ras-ffh-nested.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="RAS FFH nested" description="RAS errors handled in EL3 as nested exception on top of SMC call">
+ <testcase name="Inject RAS error which gets handled as nested exception during SMC exception" function="test_ras_ffh_nested" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-ras-kfh-reflect.mk b/tftf/tests/tests-ras-kfh-reflect.mk
new file mode 100644
index 000000000..bc8852fe0
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh-reflect.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ inject_ras_error.S \
+ test_ras_kfh_reflect.c \
+)
diff --git a/tftf/tests/tests-ras-kfh-reflect.xml b/tftf/tests/tests-ras-kfh-reflect.xml
new file mode 100644
index 000000000..4150200b1
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh-reflect.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="RAS KFH Reflection" description="RAS errors reflected back from EL3">
+ <testcase name="Inject RAS error which gets reflected back during IRQ handling" function="test_ras_kfh_reflect_irq" />
+ <testcase name="Inject RAS error which gets reflected back during SMC call" function="test_ras_kfh_reflect_sync" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-ras-kfh.mk b/tftf/tests/tests-ras-kfh.mk
new file mode 100644
index 000000000..e79db93cb
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
+ inject_ras_error.S \
+ test_ras_kfh.c \
+)
diff --git a/tftf/tests/tests-ras-kfh.xml b/tftf/tests/tests-ras-kfh.xml
new file mode 100644
index 000000000..3cfed499e
--- /dev/null
+++ b/tftf/tests/tests-ras-kfh.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="RAS KFH" description="RAS errors handled through Kernel First handling">
+ <testcase name="Inject RAS error and wait for it being handled" function="test_ras_kfh" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-realm-payload.mk b/tftf/tests/tests-realm-payload.mk
new file mode 100644
index 000000000..a6d4d47a2
--- /dev/null
+++ b/tftf/tests/tests-realm-payload.mk
@@ -0,0 +1,45 @@
+#
+# Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ARCH},aarch64)
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/realm_payload/, \
+ host_realm_payload_multiple_rec_tests.c \
+ host_realm_payload_tests.c \
+ host_realm_spm.c \
+ host_realm_payload_simd_tests.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_pmuv3.c \
+ host_realm_rmi.c \
+ host_realm_helper.c \
+ host_shared_data.c \
+ rmi_delegate_tests.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
+ ffa_helpers.c \
+ spm_common.c \
+ spm_test_helpers.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix lib/heap/, \
+ page_alloc.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix lib/extensions/fpu/, \
+ fpu.c \
+ )
+endif
diff --git a/tftf/tests/tests-realm-payload.xml b/tftf/tests/tests-realm-payload.xml
new file mode 100644
index 000000000..51cb09fc0
--- /dev/null
+++ b/tftf/tests/tests-realm-payload.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="Realm payload at EL1" description="Test Realm EL1 framework capabilities" >
+ <testcase name="Realm EL1 creation and execution test"
+ function="host_test_realm_create_enter" />
+ <testcase name="Multiple Realm EL1 creation and execution test"
+ function="host_test_multiple_realm_create_enter" />
+ <testcase name="Realm payload multi rec multiple cpu"
+ function="host_realm_multi_rec_multiple_cpu" />
+ <testcase name="Realm payload multi rec validations"
+ function="host_realm_multi_rec_multiple_cpu2" />
+ <testcase name="New Realm PAS Validation"
+ function="host_realm_pas_validation_new" />
+ <testcase name="Active Realm PAS validation"
+ function="host_realm_pas_validation_active" />
+ <testcase name="Realm SEA Empty"
+ function="host_realm_sea_empty" />
+ <testcase name="Realm SEA Unprotected"
+ function="host_realm_sea_unprotected" />
+ <testcase name="Realm SEA Adr Fault"
+ function="host_realm_sea_adr_fault" />
+ <testcase name="Realm Abort Unassigned RAM"
+ function="host_realm_abort_unassigned_ram" />
+ <testcase name="Realm Abort Unassigned Destroyed"
+ function="host_realm_abort_unassigned_destroyed" />
+ <testcase name="Realm Abort Assigned destroyed"
+ function="host_realm_abort_assigned_destroyed" />
+ <testcase name="Realm payload multi rec single cpu"
+ function="host_realm_multi_rec_single_cpu" />
+ <testcase name="Realm payload multi rec psci denied"
+ function="host_realm_multi_rec_psci_denied" />
+ <testcase name="Realm payload multi rec force exit on NS IRQ"
+ function="host_realm_multi_rec_exit_irq" />
+ <testcase name="Realm EL1 creation and RSI version"
+ function="host_test_realm_rsi_version" />
+ <testcase name="Realm payload boot"
+ function="host_realm_version_single_cpu" />
+ <testcase name="Realm payload multi CPU request"
+ function="host_realm_version_multi_cpu" />
+ <testcase name="Realm payload Delegate and Undelegate"
+ function="host_realm_delegate_undelegate" />
+ <testcase name="Multi CPU Realm payload Delegate and Undelegate"
+ function="host_realm_delundel_multi_cpu" />
+ <testcase name="Testing delegation fails"
+ function="host_realm_fail_del" />
+ <testcase name="PMUv3 cycle counter functional in Realm"
+ function="host_realm_pmuv3_cycle_works" />
+ <testcase name="PMUv3 event counter functional in Realm"
+ function="host_realm_pmuv3_event_works" />
+ <testcase name="PMUv3 RSI SMC counter preservation"
+ function="host_realm_pmuv3_rmm_preserves" />
+ <testcase name="PMUv3 overflow interrupt"
+ function="host_realm_pmuv3_overflow_interrupt" />
+ <testcase name="PMUv3 multiple rec validations"
+ function="host_realm_pmuv3_mul_rec" />
+ <testcase name="Test Secure interrupt can preempt Realm EL1"
+ function="host_realm_sec_interrupt_can_preempt_rl" />
+ <testcase name="Check that FPU state registers context is preserved in RL/SE/NS"
+ function="host_realm_fpu_access_in_rl_ns_se" />
+ <testcase name="Realm request set_ripas"
+ function="host_realm_set_ripas" />
+ <testcase name="Realm reject set_ripas"
+ function="host_realm_reject_set_ripas" />
+ <!-- Test case related to SVE support and SIMD state -->
+ <testcase name="Check RMI reports proper SVE VL"
+ function="host_check_rmi_reports_proper_sve_vl" />
+ <testcase name="Create SVE Realm with invalid VL"
+ function="host_sve_realm_test_invalid_vl" />
+ <testcase name="Create SVE Realm and test ID registers"
+ function="host_sve_realm_cmd_id_registers" />
+ <testcase name="Create non SVE Realm and test ID registers"
+ function="host_non_sve_realm_cmd_id_registers" />
+ <testcase name="Create SVE Realm and check rdvl result"
+ function="host_sve_realm_cmd_rdvl" />
+ <testcase name="Create SVE Realm and probe all supported VLs"
+ function="host_sve_realm_cmd_probe_vl" />
+ <testcase name="Check whether RMM preserves NS ZCR_EL2 register"
+ function="host_sve_realm_check_config_register" />
+ <testcase name="Intermittently switch to Realm while doing NS SVE ops"
+ function="host_sve_realm_check_vectors_operations" />
+ <testcase name="Check if RMM does not leak Realm SVE vector registers"
+ function="host_sve_realm_check_vectors_leaked" />
+ <testcase name="Check if Realm gets undefined abort if it access SVE"
+ function="host_non_sve_realm_check_undef_abort" />
+ <testcase name="Check various SIMD state preserved across NS/RL switch"
+ function="host_and_realm_check_simd" />
+ <!-- Test Realm for SME -->
+ <testcase name="Create Realm and test SME ID registers"
+ function="host_realm_check_sme_id_registers" />
+ <testcase name="Check if Realm gets undefined abort when it access SME"
+ function="host_realm_check_sme_undef_abort" />
+ <testcase name="Check whether RMM preserves NS SME configurations"
+ function="host_realm_check_sme_configs" />
+ <testcase name="Intermittently switch to Realm while NS doing SSVE ops"
+ function="host_sve_realm_check_streaming_vectors_operations" />
+ <!-- Test case related to PAuth -->
+ <testcase name="Check if PAuth keys are preserved in RL/SE/NS"
+ function="host_realm_enable_pauth" />
+ <testcase name="Generate PAuth Fault by overwriting LR"
+ function="host_realm_pauth_fault" />
+ <testcase name="Check if DIT Bit is preserved in RL/NS"
+ function="host_realm_enable_dit" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-rmi-spm.mk b/tftf/tests/tests-rmi-spm.mk
new file mode 100644
index 000000000..735e1911f
--- /dev/null
+++ b/tftf/tests/tests-rmi-spm.mk
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2021-2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+ifeq (${ARCH},aarch64)
+
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_realm_rmi.c \
+ host_realm_helper.c \
+ rmi_spm_tests.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
+ ffa_helpers.c \
+ spm_common.c \
+ spm_test_helpers.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix lib/heap/, \
+ page_alloc.c \
+ )
+endif
diff --git a/tftf/tests/tests-rmi-spm.xml b/tftf/tests/tests-rmi-spm.xml
new file mode 100644
index 000000000..1d12b4a35
--- /dev/null
+++ b/tftf/tests/tests-rmi-spm.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2021-2022, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="RMI and SPM tests" description="Tests for SPM and RMI delegate/undelegate">
+ <testcase name="Test TFTF can call RMM/TRP and SPM serially on a single core"
+ function="test_spm_rmm_serial_smc" />
+ <testcase name="Test TFTF can call RMM/TRP and SPM parallelly on a multi core"
+ function="test_spm_rmm_parallel_smc" />
+ </testsuite>
+
+</testsuites> \ No newline at end of file
diff --git a/tftf/tests/tests-rng_trap.mk b/tftf/tests/tests-rng_trap.mk
new file mode 100644
index 000000000..2457b0cbd
--- /dev/null
+++ b/tftf/tests/tests-rng_trap.mk
@@ -0,0 +1,9 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += $(addprefix tftf/tests/, \
+ extensions/rng_trap/test_rng_trap.c \
+)
diff --git a/tftf/tests/tests-rng_trap.xml b/tftf/tests/tests-rng_trap.xml
new file mode 100644
index 000000000..1f8cb9055
--- /dev/null
+++ b/tftf/tests/tests-rng_trap.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2022, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="RNG_TRAP" description="Tests that RNDR/RRS instructions trap to EL3 and returns a random number">
+ <testcase name="Test if RNDR instruction traps to el3 and gets a random number" function="test_rndr_rng_trap" />
+ <testcase name="Test if RNDRSS instruction traps to el3 and gets a random number" function="test_rndrrs_rng_trap" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-sdei.mk b/tftf/tests/tests-sdei.mk
index 0c495d99f..e73bfb72b 100644
--- a/tftf/tests/tests-sdei.mk
+++ b/tftf/tests/tests-sdei.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2021, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -10,4 +10,5 @@ TESTS_SOURCES += \
test_sdei.c \
test_sdei_state.c \
test_sdei_rm_any.c \
+ test_sdei_pstate.c \
)
diff --git a/tftf/tests/tests-sdei.xml b/tftf/tests/tests-sdei.xml
index 147835bc5..38c7c0d0e 100644
--- a/tftf/tests/tests-sdei.xml
+++ b/tftf/tests/tests-sdei.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2020, Arm Limited. All rights reserved.
+ Copyright (c) 2020-2021, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -15,6 +15,7 @@
<testcase name="SDEI event signaling: each core signals itself" function="test_sdei_event_signal_serial" />
<testcase name="SDEI event signaling: one core signals all others" function="test_sdei_event_signal_all" />
<testcase name="SDEI event routing all: SPI events routed to all CPUs" function="test_sdei_routing_any" />
+ <testcase name="SDEI event handler pstate testing" function="test_sdei_event_check_pstate" />
</testsuite>
</testsuites>
diff --git a/tftf/tests/tests-single-fault.mk b/tftf/tests/tests-single-fault.mk
index 064186516..86a23c380 100644
--- a/tftf/tests/tests-single-fault.mk
+++ b/tftf/tests/tests-single-fault.mk
@@ -1,10 +1,10 @@
#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
+# Copyright (c) 2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
- inject_serror.S \
+ inject_ras_error.S \
test_single_fault.c \
)
diff --git a/tftf/tests/tests-smcfuzzing.mk b/tftf/tests/tests-smcfuzzing.mk
index 82b6a7c85..2834e4ec0 100644
--- a/tftf/tests/tests-smcfuzzing.mk
+++ b/tftf/tests/tests-smcfuzzing.mk
@@ -1,12 +1,50 @@
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+# Generate random fuzzing seeds
+# If no instance count is provided, default to 1 instance
+# If no seeds are provided, generate them randomly
+# The number of seeds provided must match the instance count
+SMC_FUZZ_INSTANCE_COUNT ?= 1
+SMC_FUZZ_SEEDS ?= $(shell python -c "from random import randint; seeds = [randint(0, 4294967295) for i in range($(SMC_FUZZ_INSTANCE_COUNT))];print(\",\".join(str(x) for x in seeds));")
+SMC_FUZZ_CALLS_PER_INSTANCE ?= 100
+
+# Validate SMC fuzzer parameters
+
+# Instance count must not be zero
+ifeq ($(SMC_FUZZ_INSTANCE_COUNT),0)
+$(error SMC_FUZZ_INSTANCE_COUNT must not be zero!)
+endif
+
+# Calls per instance must not be zero
+ifeq ($(SMC_FUZZ_CALLS_PER_INSTANCE),0)
+$(error SMC_FUZZ_CALLS_PER_INSTANCE must not be zero!)
+endif
+
+# Make sure seed count and instance count match
+TEST_SEED_COUNT = $(shell python -c "print(len(\"$(SMC_FUZZ_SEEDS)\".split(\",\")))")
+ifneq ($(TEST_SEED_COUNT), $(SMC_FUZZ_INSTANCE_COUNT))
+$(error Number of seeds does not match SMC_FUZZ_INSTANCE_COUNT!)
+endif
+
+# Add definitions to TFTF_DEFINES so they can be used in the code
+$(eval $(call add_define,TFTF_DEFINES,SMC_FUZZ_SEEDS))
+$(eval $(call add_define,TFTF_DEFINES,SMC_FUZZ_INSTANCE_COUNT))
+$(eval $(call add_define,TFTF_DEFINES,SMC_FUZZ_CALLS_PER_INSTANCE))
+ifeq ($(MULTI_CPU_SMC_FUZZER),1)
+$(eval $(call add_define,TFTF_DEFINES,MULTI_CPU_SMC_FUZZER))
+endif
+
TESTS_SOURCES += \
$(addprefix smc_fuzz/src/, \
randsmcmod.c \
smcmalloc.c \
fifo3d.c \
+ runtestfunction_helpers.c \
+ sdei_fuzz_helper.c \
+ tsp_fuzz_helper.c \
+ nfifo.c \
)
diff --git a/tftf/tests/tests-spm.mk b/tftf/tests/tests-spm.mk
index e62e03d94..97b3a491d 100644
--- a/tftf/tests/tests-spm.mk
+++ b/tftf/tests/tests-spm.mk
@@ -1,19 +1,46 @@
#
-# Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
+TFTF_INCLUDES += \
+ -Iinclude/runtime_services/host_realm_managment
+
TESTS_SOURCES += \
$(addprefix tftf/tests/runtime_services/secure_service/, \
+ ${ARCH}/ffa_arch_helpers.S \
ffa_helpers.c \
spm_common.c \
+ spm_test_helpers.c \
test_ffa_direct_messaging.c \
- test_ffa_features.c \
test_ffa_interrupts.c \
+ test_ffa_secure_interrupts.c \
test_ffa_memory_sharing.c \
- test_ffa_rxtx_map.c \
- test_ffa_version.c \
- test_spm_cpu_features.c \
+ test_ffa_setup_and_discovery.c \
+ test_ffa_notifications.c \
test_spm_smmu.c \
+ test_ffa_exceptions.c \
+ test_ffa_group0_interrupts.c \
+ )
+
+ifeq (${ARCH},aarch64)
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ test_spm_simd.c \
+ )
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/host_realm_managment/, \
+ host_realm_rmi.c \
+ host_realm_helper.c \
)
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/secure_service/, \
+ test_ffa_smccc.c \
+ test_ffa_smccc_asm.S \
+ )
+
+TESTS_SOURCES += lib/extensions/fpu/fpu.c
+endif
diff --git a/tftf/tests/tests-spm.xml b/tftf/tests/tests-spm.xml
index 32efc161b..09e0fd772 100644
--- a/tftf/tests/tests-spm.xml
+++ b/tftf/tests/tests-spm.xml
@@ -1,33 +1,61 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2023, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
<testsuites>
+ <testsuite name="FF-A Setup and Discovery"
+ description="Test FF-A Setup and Discovery interfaces" >
+ <testcase name="Test FFA_FEATURES"
+ function="test_ffa_features" />
- <testsuite name="FF-A Version"
- description="Test FF-A Version ABI" >
-
- <testcase name="Same FFA version as SPM"
- function="test_ffa_version_equal" />
- <testcase name="Setting bit 31 in input version"
- function="test_ffa_version_bit31"/>
- <testcase name="Bigger FFA version than SPM"
- function="test_ffa_version_bigger" />
- <testcase name="Smaller FFA version than SPM"
- function="test_ffa_version_smaller" />
-
- </testsuite>
+ <testcase name="Same FFA version as SPM"
+ function="test_ffa_version_equal" />
+ <testcase name="Setting bit 31 in input version"
+ function="test_ffa_version_bit31"/>
+ <testcase name="Bigger FFA version than SPM"
+ function="test_ffa_version_bigger" />
+ <testcase name="Smaller FFA version than SPM"
+ function="test_ffa_version_smaller" />
- <testsuite name="FF-A RXTX Mapping"
- description="Test to FF-A RXTX mapping ABI" >
<testcase name="FF-A RXTX Map API success"
function="test_ffa_rxtx_map_success" />
<testcase name="FF-A RXTX Map API consecutive"
function="test_ffa_rxtx_map_fail" />
+ <testcase name="FF-A RXTX Unmap API success"
+ function="test_ffa_rxtx_unmap_success" />
+ <testcase name="FF-A RXTX Unmap API consecutive"
+ function="test_ffa_rxtx_unmap_fail" />
+ <testcase name="FF-A RXTX remap unmapped region success"
+ function="test_ffa_rxtx_map_unmapped_success" />
+ <testcase name="FF-A RXTX unmap SP rxtx buffer"
+ function="test_ffa_rxtx_unmap_fail_if_sp" />
+ <testcase name="Test FFA_SPM_ID_GET"
+ function="test_ffa_spm_id_get" />
+
+ <testcase name="Test FFA_PARTITION_INFO_GET"
+ function="test_ffa_partition_info" />
+ <testcase name="Test FFA_PARTITION_INFO_GET v1.0"
+ function="test_ffa_partition_info_v1_0" />
+ </testsuite>
+
+ <testsuite name="FF-A SMCCC compliance"
+ description="SMCCC compliance" >
+ <testcase name="FF-A callee preserves GP register set per SMCCC"
+ function="test_smccc_callee_preserved" />
+ <testcase name="FF-A callee preserves extended GP register set per SMCCC"
+ function="test_smccc_ext_callee_preserved" />
+ </testsuite>
+
+ <testsuite name="SP exceptions"
+ description="SP exceptions" >
+
+ <testcase name="Access from a SP to a Realm region"
+ function="rl_memory_cannot_be_accessed_in_s" />
+
</testsuite>
<testsuite name="FF-A Direct messaging"
@@ -44,6 +72,14 @@
</testsuite>
+ <testsuite name="FF-A Group0 interrupts"
+ description="Test FF-A Group0 secure interrupt delegation to EL3" >
+ <testcase name="FF-A Group0 secure world"
+ function="test_ffa_group0_interrupt_sp_running" />
+ <testcase name="FF-A Group0 normal world"
+ function="test_ffa_group0_interrupt_in_nwd" />
+ </testsuite>
+
<testsuite name="FF-A Power management"
description="Test FF-A power management" >
<testcase name="FF-A SP hotplug"
@@ -52,8 +88,24 @@
<testsuite name="FF-A Memory Sharing"
description="Test FF-A Memory Sharing ABIs" >
+ <testcase name="Hypervisor share + memory retrieve request"
+ function="test_hypervisor_share_retrieve" />
+ <testcase name="Hypervisor lend + memory retrieve request"
+ function="test_hypervisor_lend_retrieve" />
+ <testcase name="Hypervisor donate + memory retrieve request"
+ function="test_hypervisor_donate_retrieve" />
+ <testcase name="Hypervisor share + memory retrieve request (multiple receivers)"
+ function="test_hypervisor_share_retrieve_multiple_receivers" />
+ <testcase name="Hypervisor lend + memory retrieve request (multiple receivers)"
+ function="test_hypervisor_lend_retrieve_multiple_receivers" />
+ <testcase name="Hypervisor share + memory retrieve request (fragmented)"
+ function="test_hypervisor_share_retrieve_fragmented" />
+ <testcase name="Hypervisor lend + memory retrieve request (fragmented)"
+ function="test_hypervisor_lend_retrieve_fragmented" />
<testcase name="Lend Memory to Secure World"
function="test_mem_lend_sp" />
+ <testcase name="Lend memory, clear flag set"
+ function="test_mem_share_to_sp_clear_memory"/>
<testcase name="Share Memory with Secure World"
function="test_mem_share_sp" />
<testcase name="Donate Memory to Secure World"
@@ -64,29 +116,108 @@
function="test_req_mem_lend_sp_to_sp" />
<testcase name="Request Donate Memory SP-to-SP"
function="test_req_mem_donate_sp_to_sp" />
- </testsuite>
-
- <testsuite name="FF-A features"
- description="Test FFA_FEATURES ABI" >
- <testcase name="Test FFA_FEATURES"
- function="test_ffa_features" />
+ <testcase name="Request Share NS Memory (large PA) SP-to-SP"
+ function="test_req_ns_mem_share_sp_to_sp" />
+ <testcase name="Request Share Memory SP-to-VM"
+ function="test_req_mem_share_sp_to_vm" />
+ <testcase name="Request Lend Memory SP-to-VM"
+ function="test_req_mem_lend_sp_to_vm" />
+ <testcase name="Share forbidden memory with SP"
+ function="test_share_forbidden_ranges" />
+ <testcase name="Donate consecutively"
+ function="test_consecutive_donate" />
</testsuite>
<testsuite name="SIMD,SVE Registers context"
description="Validate context switch between NWd and SWd" >
<testcase name="Check that SIMD registers context is preserved"
function="test_simd_vectors_preserved" />
+ <testcase name="Check that SVE registers context is preserved"
+ function="test_sve_vectors_preserved" />
+ <testcase name="Check that SVE operations in NWd are unaffected by SWd"
+ function="test_sve_vectors_operations" />
</testsuite>
<testsuite name="FF-A Interrupt"
description="Test non-secure Interrupts" >
- <testcase name="Test NS interrupts"
- function="test_ffa_ns_interrupt" />
+<!--
+ <testcase name="Test SPx with NS Interrupt queued"
+ function="test_ffa_ns_interrupt_queued" />
+-->
+ <testcase name="Test SPx with NS Interrupt signaled and SPy with Managed Exit"
+ function="test_ffa_SPx_signaled_SPy_ME" />
+ <testcase name="Test Managed Exit in SP call chain"
+ function="test_ffa_ns_interrupt_managed_exit_chained" />
+ <testcase name="Test SPx with Managed Exit and SPy with NS Interrupt signaled"
+ function="test_ffa_SPx_ME_SPy_signaled" />
+ <testcase name="Test Managed Exit"
+ function="test_ffa_ns_interrupt_managed_exit" />
+ <testcase name="Test NS interrupt Signalable"
+ function="test_ffa_ns_interrupt_signaled" />
+ <testcase name="Test Secure interrupt handling while SP running"
+ function="test_ffa_sec_interrupt_sp_running" />
+ <testcase name="Test Secure interrupt handling while SP waiting"
+ function="test_ffa_sec_interrupt_sp_waiting" />
+ <testcase name="Test Secure interrupt handling while SP blocked"
+ function="test_ffa_sec_interrupt_sp_blocked" />
+ <testcase name="Test Secure interrupt handling while SP1 waiting SP2 running"
+ function="test_ffa_sec_interrupt_sp1_waiting_sp2_running" />
+ <testcase name="Test ESPI Secure interrupt handling"
+ function="test_ffa_espi_sec_interrupt" />
</testsuite>
<testsuite name="SMMUv3 tests"
description="Initiate stage2 translation for streams from upstream peripherals" >
<testcase name="Check DMA command by SMMUv3TestEngine completes"
function="test_smmu_spm" />
+ <testcase name="Check secure peripheral access to a realm region is aborted"
+ function="test_smmu_spm_invalid_access" />
</testsuite>
+
+ <testsuite name="FF-A Notifications"
+ description="Test Notifications functionality" >
+ <testcase name="Notifications interrupts ID retrieval with FFA_FEATURES"
+ function= "test_notifications_retrieve_int_ids" />
+ <testcase name="Notifications bitmap create and destroy"
+ function="test_ffa_notifications_bitmap_create_destroy" />
+ <testcase name="Notifications bitmap destroy not created"
+ function="test_ffa_notifications_destroy_not_created" />
+ <testcase name="Notifications bitmap create after create"
+ function="test_ffa_notifications_create_after_create" />
+ <testcase name="SP Notifications bind and unbind"
+ function="test_ffa_notifications_sp_bind_unbind" />
+ <testcase name="VM Notifications bind and unbind"
+ function="test_ffa_notifications_vm_bind_unbind" />
+ <testcase name="VM Notifications bind NS Sender"
+ function="test_ffa_notifications_vm_bind_vm" />
+ <testcase name="Notifications bind/unbind of bound Notifications"
+ function="test_ffa_notifications_already_bound" />
+ <testcase name="Notifications bind/unbind SPs spoofing receiver"
+ function="test_ffa_notifications_bind_unbind_spoofing" />
+ <testcase name="Notifications zeroed in bind and unbind"
+ function="test_ffa_notifications_bind_unbind_zeroed" />
+ <testcase name="Notifications VM signals SP"
+ function="test_ffa_notifications_vm_signals_sp" />
+ <testcase name="Notifications SP signals SP"
+ function="test_ffa_notifications_sp_signals_sp" />
+ <testcase name="Notifications SP signals VM"
+ function="test_ffa_notifications_sp_signals_vm" />
+ <testcase name="Notifications SP signals SP with immediate SRI"
+ function="test_ffa_notifications_sp_signals_sp_immediate_sri" />
+ <testcase name="Notifications SP signals SP with delayed SRI"
+ function="test_ffa_notifications_sp_signals_sp_delayed_sri" />
+ <testcase name="Notifications unbind while pending"
+ function="test_ffa_notifications_unbind_pending" />
+ <testcase name="Notifications MP SP signals UP SP per-vCPU"
+ function="test_ffa_notifications_mp_sp_signals_up_sp" />
+ <testcase name="Notifications info get no data"
+ function="test_ffa_notifications_info_get_none" />
+ <testcase name="Notifications VM signals SP per-vCPU"
+ function="test_ffa_notifications_vm_signals_sp_per_vcpu" />
+ <testcase name="Notifications SP signals SP per-vCPU"
+ function="test_ffa_notifications_sp_signals_sp_per_vcpu" />
+ <testcase name="Notifications SP signals VM per-vCPU"
+ function="test_ffa_notifications_sp_signals_vm_per_vcpu" />
+ </testsuite>
+
</testsuites>
diff --git a/tftf/tests/tests-standard.mk b/tftf/tests/tests-standard.mk
index c6c9029f3..46157ce1e 100644
--- a/tftf/tests/tests-standard.mk
+++ b/tftf/tests/tests-standard.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -16,13 +16,21 @@ TESTS_MAKEFILE := $(addprefix tftf/tests/, \
tests-sdei.mk \
tests-single-fault.mk \
tests-smc.mk \
- tests-spm.mk \
tests-template.mk \
tests-tftf-validation.mk \
tests-trng.mk \
+ tests-errata_abi.mk \
tests-tsp.mk \
tests-uncontainable.mk \
- tests-debugfs.mk \
+ tests-debugfs.mk \
+)
+
+ifeq (${ARCH},aarch64)
+TESTS_MAKEFILE += $(addprefix tftf/tests/, \
+ tests-spm.mk \
+ tests-realm-payload.mk \
+ tests-rmi-spm.mk \
)
+endif
include ${TESTS_MAKEFILE}
diff --git a/tftf/tests/tests-standard.xml b/tftf/tests/tests-standard.xml
index 8c66cdaf0..d2c2639df 100644
--- a/tftf/tests/tests-standard.xml
+++ b/tftf/tests/tests-standard.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018-2021, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2023, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
@@ -15,6 +15,7 @@
<!ENTITY tests-sdei SYSTEM "tests-sdei.xml">
<!ENTITY tests-rt-instr SYSTEM "tests-runtime-instrumentation.xml">
<!ENTITY tests-trng SYSTEM "tests-trng.xml">
+ <!ENTITY tests-errata_abi SYSTEM "tests-errata_abi.xml">
<!ENTITY tests-tsp SYSTEM "tests-tsp.xml">
<!ENTITY tests-el3-pstate SYSTEM "tests-el3-power-state.xml">
<!ENTITY tests-state-switch SYSTEM "tests-arm-state-switch.xml">
@@ -24,6 +25,8 @@
<!ENTITY tests-spm SYSTEM "tests-spm.xml">
<!ENTITY tests-pmu-leakage SYSTEM "tests-pmu-leakage.xml">
<!ENTITY tests-debugfs SYSTEM "tests-debugfs.xml">
+ <!ENTITY tests-rmi-spm SYSTEM "tests-rmi-spm.xml">
+ <!ENTITY tests-realm-payload SYSTEM "tests-realm-payload.xml">
]>
<testsuites>
@@ -34,6 +37,7 @@
&tests-sdei;
&tests-rt-instr;
&tests-trng;
+ &tests-errata_abi;
&tests-tsp;
&tests-el3-pstate;
&tests-state-switch;
@@ -43,5 +47,7 @@
&tests-spm;
&tests-pmu-leakage;
&tests-debugfs;
+ &tests-rmi-spm;
+ &tests-realm-payload;
</testsuites>
diff --git a/tftf/tests/tests-tftf-validation.xml b/tftf/tests/tests-tftf-validation.xml
index 932b10e8b..e1e48d96c 100644
--- a/tftf/tests/tests-tftf-validation.xml
+++ b/tftf/tests/tests-tftf-validation.xml
@@ -20,7 +20,6 @@
<testcase name="Verify the timer interrupt generation" function="test_timer_framework_interrupt" />
<testcase name="Target timer to a power down cpu" function="test_timer_target_power_down_cpu" />
<testcase name="Test scenario where multiple CPUs call same timeout" function="test_timer_target_multiple_same_interval" />
- <testcase name="Stress test the timer framework" function="stress_test_timer_framework" />
</testsuite>
</testsuites>
diff --git a/tftf/tests/tests-timer-stress.mk b/tftf/tests/tests-timer-stress.mk
new file mode 100644
index 000000000..50ee7234b
--- /dev/null
+++ b/tftf/tests/tests-timer-stress.mk
@@ -0,0 +1,10 @@
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/framework_validation_tests/, \
+ test_timer_framework.c \
+ )
diff --git a/tftf/tests/tests-timer-stress.xml b/tftf/tests/tests-timer-stress.xml
new file mode 100644
index 000000000..e461e9070
--- /dev/null
+++ b/tftf/tests/tests-timer-stress.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2022, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+
+ <testsuite name="Stress tests" description="Validate all stress tests">
+ <testcase name="Stress test the timer framework" function="stress_test_timer_framework" />
+ </testsuite>
+
+</testsuites>
diff --git a/tftf/tests/tests-trng.mk b/tftf/tests/tests-trng.mk
index d2842964e..abeb5b544 100644
--- a/tftf/tests/tests-trng.mk
+++ b/tftf/tests/tests-trng.mk
@@ -1,7 +1,10 @@
#
-# Copyright (c) 2021, Arm Limited. All rights reserved.
+# Copyright (c) 2021-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
-TESTS_SOURCES += tftf/tests/runtime_services/standard_service/trng/api_tests/test_trng.c
+TESTS_SOURCES += \
+ $(addprefix tftf/tests/runtime_services/standard_service/, \
+ /trng/api_tests/test_trng.c \
+ )
diff --git a/tftf/tests/tests-tsp.mk b/tftf/tests/tests-tsp.mk
index b1d8b15c7..35ef02af9 100644
--- a/tftf/tests/tests-tsp.mk
+++ b/tftf/tests/tests-tsp.mk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -11,4 +11,5 @@ TESTS_SOURCES += \
test_smc_tsp_std_fn_call.c \
test_tsp_fast_smc.c \
test_normal_int_switch.c \
+ test_pstate_after_exception.c \
)
diff --git a/tftf/tests/tests-tsp.xml b/tftf/tests/tests-tsp.xml
index 7e1018ece..55dfbea85 100644
--- a/tftf/tests/tests-tsp.xml
+++ b/tftf/tests/tests-tsp.xml
@@ -1,13 +1,12 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2018, Arm Limited. All rights reserved.
+ Copyright (c) 2018-2022, Arm Limited. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-->
<testsuites>
-
<testsuite name="IRQ support in TSP" description="Test the normal IRQ preemption support in TSP.">
<testcase name="TSP preempt by IRQ and resume" function="tsp_int_and_resume" />
<testcase name="Fast SMC while TSP preempted" function="test_fast_smc_when_tsp_preempted" />
@@ -32,4 +31,8 @@
<testcase name="Stress test TSP functionality" function="test_tsp_fast_smc_operations" />
</testsuite>
+ <testsuite name="TSP PSTATE test" description="Test PSTATE bits are maintained during exception">
+ <testcase name="Test PSTATE bits maintained on exception" function="tsp_check_pstate_maintained_on_exception" />
+ </testsuite>
+
</testsuites>
diff --git a/tftf/tests/tests-uncontainable.mk b/tftf/tests/tests-uncontainable.mk
index 7a4730025..873b43976 100644
--- a/tftf/tests/tests-uncontainable.mk
+++ b/tftf/tests/tests-uncontainable.mk
@@ -1,10 +1,10 @@
#
-# Copyright (c) 2018, Arm Limited. All rights reserved.
+# Copyright (c) 2023, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
TESTS_SOURCES += $(addprefix tftf/tests/misc_tests/, \
- inject_serror.S \
+ inject_ras_error.S \
test_uncontainable.c \
)
diff --git a/tftf/tests/tests-undef-injection.mk b/tftf/tests/tests-undef-injection.mk
new file mode 100644
index 000000000..e13df173f
--- /dev/null
+++ b/tftf/tests/tests-undef-injection.mk
@@ -0,0 +1,7 @@
+#
+# Copyright (c) 2023, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+TESTS_SOURCES += tftf/tests/misc_tests/test_undef_injection.c
diff --git a/tftf/tests/tests-undef-injection.xml b/tftf/tests/tests-undef-injection.xml
new file mode 100644
index 000000000..0d43cdf02
--- /dev/null
+++ b/tftf/tests/tests-undef-injection.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2023, Arm Limited. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<testsuites>
+ <testsuite name="UNDEF Injection" description="UNDEF injection from EL3 to lower EL">
+ <testcase name="UNDEF Injection to lower EL"
+ function="test_undef_injection" />
+ </testsuite>
+</testsuites>
diff --git a/tftf/tests/tests-versal.mk b/tftf/tests/tests-versal.mk
new file mode 100644
index 000000000..6717ee53f
--- /dev/null
+++ b/tftf/tests/tests-versal.mk
@@ -0,0 +1,12 @@
+#
+# Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+TESTS_SOURCES += $(addprefix tftf/tests/plat/xilinx/common/, \
+ plat_pm.c \
+)
+
+
+include tftf/tests/tests-standard.mk
+TESTS_SOURCES += $(sort ${TESTS_SOURCES})
diff --git a/tftf/tests/tests-versal.xml b/tftf/tests/tests-versal.xml
new file mode 100644
index 000000000..6c8f51946
--- /dev/null
+++ b/tftf/tests/tests-versal.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+ Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved.
+
+ SPDX-License-Identifier: BSD-3-Clause
+-->
+
+<document>
+ <!-- External reference to standard tests files. -->
+ <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="tests-standard.xml" />
+ <testsuites>
+
+ <testsuite name="AMD-Xilinx tests" description="AMD-Xilinx common platform tests" >
+ <testcase name="Read PM API Version" function="test_pmapi_version" />
+ <testcase name="Get Platform Chip ID" function="test_get_chipid" />
+ </testsuite>
+
+ </testsuites>
+</document>
diff --git a/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c b/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c
index 519ff16b2..c4ffbf9ac 100644
--- a/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c
+++ b/tftf/tests/xlat_lib_v2/xlat_lib_v2_tests.c
@@ -163,6 +163,9 @@ static int add_region(unsigned long long base_pa, uintptr_t base_va,
{
int ret;
+ if (size == 0U) {
+ return -EPERM;
+ }
VERBOSE("mmap_add_dynamic_region(0x%llx, 0x%lx, 0x%zx, 0x%x)\n",
base_pa, base_va, size, attr);
diff --git a/tools/generate_dtb/generate_dtb.sh b/tools/generate_dtb/generate_dtb.sh
index 564c2a05d..1e0797b06 100755
--- a/tools/generate_dtb/generate_dtb.sh
+++ b/tools/generate_dtb/generate_dtb.sh
@@ -1,7 +1,7 @@
#!/bin/bash
#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
+# Copyright (c) 2018-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -12,13 +12,14 @@
# $1 = image_name (lowercase)
# $2 = path/to/file.dts
# $3 = build/$PLAT/$BUILD_TYPE/
+# $4 = path to store the dtb generated by this script
ORIGINAL_DTS=$2
MAPFILE="$3/$1/$1.map"
EXTRA_DTS="$3/$1/$1_extra.dts"
COMBINED_DTS="$3/$1/$1_combined.dts"
PREPROCESSED_DTS="$3/$1/$1_preprocessed.dts"
-GENERATED_DTB="$3/$1.dtb"
+GENERATED_DTB=$4
# Look for the start and end of the sections that are only known in the elf file
# after compiling the partition.
@@ -43,7 +44,6 @@ cat "$ORIGINAL_DTS" > "$COMBINED_DTS"
INCLUDES="-I spm/cactus
-I spm/ivy
- -I spm/quark
-I spm/include
-I include/lib"
diff --git a/tools/generate_json/generate_json.sh b/tools/generate_json/generate_json.sh
index f46cf158a..58677004d 100755
--- a/tools/generate_json/generate_json.sh
+++ b/tools/generate_json/generate_json.sh
@@ -1,36 +1,107 @@
#!/bin/bash
#
-# Copyright (c) 2020, Arm Limited. All rights reserved.
+# Copyright (c) 2020-2022, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Generate a JSON file which will be fed to TF-A as SPM_LAYOUT_FILE to package
# Secure Partitions as part of FIP.
+# Note the script will append the partition to the existing layout file.
+# If you wish to only generate a layout file with this partition first run
+# "make realclean" to remove the existing file.
-# $1 = Secure Partition (cactus)
-# $2 = Platform built path
-# Output = $2/sp_layout.json
+# $1 = Platform built path
+# $2.. = List of Secure Partitions
+# Output = $1/sp_layout.json
-GENERATED_JSON=$2/sp_layout.json
+GENERATED_JSON=$1/sp_layout.json
+shift # Shift arguments 1
+
+PARTITION_ALREADY_PRESENT=false
+
+CACTUS_PRESENT=false
+IVY_PRESENT=false
+IVY_SHIM_PRESENT=false
+
+for target in "$@"; do
+ case $target in
+ cactus) CACTUS_PRESENT=true ;;
+ ivy) IVY_PRESENT=true ;;
+ ivy_shim) IVY_SHIM_PRESENT=true ;;
+ *) echo "Invalid target $target"; exit 1 ;;
+ esac
+done
+
+echo -e "{" > "$GENERATED_JSON"
# To demonstrate communication between SP's, two cactus S-EL1 instances used.
# To also test mapping of the RXTX region a third cactus S-EL1 instance is used.
# cactus-primary, cactus-secondary and cactus-tertiary have same binary but
# different partition manifests.
-if [ "$1" == "cactus" ]; then
- echo -e "{\n\t\"$1-primary\" : {\n \
- \t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"$1.dts\",\n \
- \t\"owner\": \"SiP\"\n\t},\n\n\t\"$1-secondary\" : {\n \
- \t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"$1-secondary.dts\",\n \
- \t\"owner\": \"Plat\"\n\t},\n\n\t\"$1-tertiary\" : {\n \
- \t\"image\": \"$1.bin\",\n \
- \t\"pm\": \"$1-tertiary.dts\" \n \
- }\n}" \
- > "$GENERATED_JSON"
+if [ $CACTUS_PRESENT == "true" ]; then
+ cat >> "$GENERATED_JSON" << EOF
+"cactus-primary" : {
+ "image": {
+ "file": "cactus.bin",
+ "offset":"0x2000"
+ },
+ "pm": {
+ "file": "cactus.dts",
+ "offset": "0x1000"
+ },
+ "physical-load-address": "0x7000000",
+ "owner": "SiP"
+},
+
+"cactus-secondary" : {
+ "image": "cactus.bin",
+ "pm": "cactus-secondary.dts",
+ "physical-load-address": "0x7100000",
+ "owner": "Plat"
+},
+
+"cactus-tertiary" : {
+ "image": "cactus.bin",
+ "pm": "cactus-tertiary.dts",
+ "physical-load-address": "0x7200000",
+ "owner": "Plat"
+EOF
+ PARTITION_ALREADY_PRESENT=true
+fi
+
+if [ $IVY_PRESENT == "true" ]; then
+ if [ $PARTITION_ALREADY_PRESENT == "true" ]; then
+ echo -ne "\t},\n\n" >> "$GENERATED_JSON"
+ fi
+
+ cat >> "$GENERATED_JSON" << EOF
+"ivy" : {
+ "image": "ivy.bin",
+ "pm": "ivy-sel0.dts",
+ "physical-load-address": "0x7600000",
+ "owner": "Plat"
+}
+EOF
+
+ PARTITION_ALREADY_PRESENT=true
+elif [ $IVY_SHIM_PRESENT == "true" ]; then
+ if [ $PARTITION_ALREADY_PRESENT == "true" ]; then
+ echo -ne "\t},\n\n" >> "$GENERATED_JSON"
+ fi
+cat >> "$GENERATED_JSON" << EOF
+"ivy" : {
+ "image": "ivy.bin",
+ "pm": "ivy-sel1.dts",
+ "physical-load-address": "0x7600000",
+ "owner": "Plat"
+}
+EOF
+
+ PARTITION_ALREADY_PRESENT=true
else
- echo -e "\nWarning: Only Cactus is supported as Secure Partition\n"
+ echo -ne "\t},\n" >> "$GENERATED_JSON"
fi
+
+echo -e "\n}" >> "$GENERATED_JSON"
diff --git a/tools/generate_test_list/generate_test_list.pl b/tools/generate_test_list/generate_test_list.pl
deleted file mode 100755
index 702a9837b..000000000
--- a/tools/generate_test_list/generate_test_list.pl
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env perl
-
-#
-# Copyright (c) 2018-2020, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-#
-# Arg0: Name of the C file to generate.
-# Arg1: Name of the header file to generate.
-# Arg2: XML file that contains the list of test suites.
-# Arg3: Text file listing the files to skip. Takes precedence over Arg2 file.
-#
-
-my $TESTLIST_SRC_FILENAME = $ARGV[0];
-my $TESTLIST_HDR_FILENAME = $ARGV[1];
-my $XML_TEST_FILENAME = $ARGV[2];
-my $SKIPPED_LIST_FILENAME = $ARGV[3];
-
-use strict;
-use warnings;
-use File::Temp;
-use XML::LibXML;
-
-# Create the source & header files
-open FILE_SRC, ">", $TESTLIST_SRC_FILENAME or die $!;
-open FILE_HDR, ">", $TESTLIST_HDR_FILENAME or die $!;
-
-#
-# Open the test list
-#
-my $doc;
-my $testsuite_elem;
-my $failure_elem;
-
-if (-e $XML_TEST_FILENAME) {
- my $parser = XML::LibXML->new(expand_entities => 1);
- $doc = $parser->parse_file($XML_TEST_FILENAME);
- $parser->process_xincludes($doc);
-} else {
- exit 1
-}
-
-# We assume if there is a root then it is a 'testsuites' element
-my $root = $doc->documentElement();
-my @all_testcases = $root->findnodes("//testcase");
-my @all_testsuites = $root->findnodes("//testsuite");
-
-
-# Check the validity of the XML file:
-# - A testsuite name must be unique.
-# - A testsuite name must not contain a '/' character.
-# - All test cases belonging to a given testsuite must have unique names.
-for my $testsuite (@all_testsuites) {
- my $testsuite_name = $testsuite->getAttribute('name');
- if ($testsuite_name =~ /\//) {
- print "ERROR: $XML_TEST_FILENAME: Invalid test suite name '$testsuite_name'.\n";
- print "ERROR: $XML_TEST_FILENAME: Test suite names can't include a '/' character.\n";
- exit 1;
- }
- my @testsuites = $root->findnodes("//testsuite[\@name='$testsuite_name']");
- if (@testsuites != 1) {
- print "ERROR: $XML_TEST_FILENAME: Can't have 2 test suites named '$testsuite_name'.\n";
- exit 1;
- }
-
- my @testcases_of_testsuite = $testsuite->findnodes("testcase");
- for my $testcase (@testcases_of_testsuite) {
- my $testcase_name = $testcase->getAttribute('name');
- my @testcases = $testsuite->findnodes("testcase[\@name='$testcase_name']");
- if (@testcases != 1) {
- print "ERROR: $XML_TEST_FILENAME: Can't have 2 tests named '$testsuite_name/$testcase_name'.\n";
- exit 1;
- }
- }
-}
-
-#
-# Get the list of tests to skip.
-# For each test to skip, find it in the XML tree and remove its node.
-#
-if (($SKIPPED_LIST_FILENAME) && (open SKIPPED_FILE, "<", $SKIPPED_LIST_FILENAME)) {
- my @lines = <SKIPPED_FILE>;
- close $SKIPPED_LIST_FILENAME;
-
- # Remove the newlines from the end of each line.
- chomp @lines;
-
- my $line_no = 0;
- my $testsuite_name;
- my $testcase_name;
- my $index = 0;
-
- for my $line (@lines) {
- ++$line_no;
-
- # Skip empty lines.
- if ($line =~ /^ *$/) { next; }
- # Skip comments.
- if ($line =~ /^#/) { next; }
-
- ($testsuite_name, $testcase_name) = split('/', $line);
-
- my @testsuites = $root->findnodes("//testsuite[\@name=\"$testsuite_name\"]");
- if (!@testsuites) {
- print "WARNING: $SKIPPED_LIST_FILENAME:$line_no: Test suite '$testsuite_name' doesn't exist or has already been deleted.\n";
- next;
- }
-
- if (!defined $testcase_name) {
- print "INFO: Testsuite '$testsuite_name' will be skipped.\n";
- $testsuites[0]->unbindNode();
- next;
- }
-
- my @testcases = $testsuites[0]->findnodes("testcase[\@name=\"$testcase_name\"]");
- if (!@testcases) {
- print "WARNING: $SKIPPED_LIST_FILENAME:$line_no: Test case '$testsuite_name/$testcase_name' doesn't exist or has already been deleted.\n";
- next;
- }
-
- print "INFO: Testcase '$testsuite_name/$testcase_name' will be skipped.\n";
- $testcases[0]->unbindNode();
- }
-}
-
-
-@all_testcases = $root->findnodes("//testcase");
-
-#
-# Generate the test function prototypes
-#
-my $testcase_count = 0;
-
-print FILE_SRC "#include \"tftf.h\"\n\n";
-
-for my $testcase (@all_testcases) {
- my $testcase_function = $testcase->getAttribute('function');
- $testcase_count++;
- print FILE_SRC "test_result_t $testcase_function(void);\n";
-}
-
-#
-# Generate the header file.
-#
-print FILE_HDR "#ifndef __TEST_LIST_H__\n";
-print FILE_HDR "#define __TEST_LIST_H__\n\n";
-print FILE_HDR "#define TESTCASE_RESULT_COUNT $testcase_count\n\n";
-print FILE_HDR "#endif\n";
-
-#
-# Generate the lists of testcases
-#
-my $testsuite_index = 0;
-my $testcase_index = 0;
-@all_testsuites = $root->findnodes("//testsuite");
-for my $testsuite (@all_testsuites) {
- my $testsuite_name = $testsuite->getAttribute('name');
- my @testcases = $testsuite->findnodes("//testsuite[\@name='$testsuite_name']//testcase");
-
- print FILE_SRC "\nconst test_case_t testcases_${testsuite_index}[] = {\n";
-
- for my $testcase (@testcases) {
- my $testcase_name = $testcase->getAttribute('name');
- my $testcase_description = $testcase->getAttribute('description');
- my $testcase_function = $testcase->getAttribute('function');
-
- if (!defined($testcase_description)) { $testcase_description = ""; }
-
- print FILE_SRC " { $testcase_index, \"$testcase_name\", \"$testcase_description\", $testcase_function },\n";
-
- $testcase_index++;
- }
- print FILE_SRC " { 0, NULL, NULL, NULL }\n";
- print FILE_SRC "};\n\n";
- $testsuite_index++;
-}
-
-#
-# Generate the lists of testsuites
-#
-$testsuite_index = 0;
-print FILE_SRC "const test_suite_t testsuites[] = {\n";
-for my $testsuite (@all_testsuites) {
- my $testsuite_name = $testsuite->getAttribute('name');
- my $testsuite_description = $testsuite->getAttribute('description');
- print FILE_SRC " { \"$testsuite_name\", \"$testsuite_description\", testcases_${testsuite_index} },\n";
- $testsuite_index++;
-}
-print FILE_SRC " { NULL, NULL, NULL }\n";
-print FILE_SRC "};\n";
-
diff --git a/tools/generate_test_list/generate_test_list.py b/tools/generate_test_list/generate_test_list.py
new file mode 100755
index 000000000..1fbe8ce7e
--- /dev/null
+++ b/tools/generate_test_list/generate_test_list.py
@@ -0,0 +1,363 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2023 Google LLC. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+"""Generates the same output as generate_test_list.pl, but using python.
+
+Takes an xml file describing a list of testsuites as well as a skip list file
+and outputs a src and header file that refers to those tests.
+"""
+
+# This script was linted and formatted using the following commands:
+# autoflake -ir --remove-all-unused-imports --expand-star-imports \
+# --remove-duplicate-keys --remove-unused-variables tools/generate_test_list/
+# isort tools/generate_test_list/
+# black tools/generate_test_list/ --line-length 100
+# flake8 tools/generate_test_list/ --max-line-length 100
+
+import argparse
+import os.path
+import urllib.parse
+import xml.etree.ElementInclude
+import xml.parsers.expat
+from dataclasses import dataclass
+from typing import Dict, List
+from xml.etree.ElementTree import Element, TreeBuilder
+
+TESTS_LIST_H_TPL_FILENAME = "tests_list.h.tpl"
+TESTCASE_COUNT_TEMPLATE = "{{testcase_count}}"
+
+TESTS_LIST_C_TPL_FILENAME = "tests_list.c.tpl"
+FUNCTION_PROTOTYPES_TEMPLATE = "{{function_prototypes}}"
+TESTCASE_LISTS_TEMPLATE = "{{testcase_lists}}"
+TESTSUITES_LIST_TEMPLATE = "{{testsuites_list}}"
+
+XINCLUDE_INCLUDE = "xi:include"
+
+MAX_EXPANSION_DEPTH = 5
+
+# Intermediate repesentation classes.
+
+
+@dataclass
+class TestCase:
+ """Class representing a single TFTF test case."""
+
+ name: str
+ function: str
+ description: str = ""
+
+
+@dataclass
+class TestSuite:
+ """Class representing a single TFTF test suite."""
+
+ name: str
+ description: str
+ testcases: List[TestCase]
+
+
+def find_element_with_name_or_return_none(iterable, name: str):
+ """Looks through iterable for an element whose 'name' field matches name."""
+ return next(filter(lambda x: x.name == name, iterable), None)
+
+
+def parse_testsuites_element_into_ir(root: Element) -> List[TestSuite]:
+ """Given the root of a parsed XML file, construct TestSuite objects."""
+ testsuite_xml_elements = root.findall(".//testsuite")
+
+ testsuites = []
+ # Parse into IR
+ for testsuite in testsuite_xml_elements:
+ testcases = []
+ for testcase in testsuite.findall("testcase"):
+ testcases += [
+ TestCase(
+ testcase.get("name"),
+ testcase.get("function"),
+ testcase.get("description", default=""),
+ )
+ ]
+ testsuites += [TestSuite(testsuite.get("name"), testsuite.get("description"), testcases)]
+
+ return testsuites
+
+
+# In order to keep this script standalone (meaning no libraries outside of the
+# standard library), we have to do our own assembling of the XML Elements. This
+# is necessary because python doesn't give us a nice way to support external
+# entity expansion. As such we have to use the low level expat parser and build
+# the tree using TreeBuilder.
+
+
+def parse_xml_no_xinclude_expansion(filename: str) -> Element:
+ """Parse filename into an ElementTree.Element, following external entities."""
+ xml_dir_root = os.path.dirname(filename)
+ with open(filename) as fobj:
+ xml_contents = fobj.read()
+
+ parser = xml.parsers.expat.ParserCreate()
+ parser.SetParamEntityParsing(xml.parsers.expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
+
+ global treebuilder
+ treebuilder = TreeBuilder()
+ global expansion_depth
+ expansion_depth = 0
+
+ def start_element_handler(name: str, attributes):
+ # ElementInclude.include requires that the XInclude namespace is expanded.
+ if name == "xi:include":
+ name = "{http://www.w3.org/2001/XInclude}include"
+ treebuilder.start(name, attributes)
+
+ def end_element_handler(name: str):
+ treebuilder.end(name)
+
+ def external_entity_ref_handler(context, base, systemId, publicId):
+ global expansion_depth
+
+ external_entity_parser = parser.ExternalEntityParserCreate(context, "utf-8")
+ assign_all_parser_callbacks(external_entity_parser)
+ with open(os.path.join(xml_dir_root, systemId)) as fobj:
+ sub_xml_contents = fobj.read()
+ expansion_depth += 1
+ if expansion_depth > MAX_EXPANSION_DEPTH:
+ raise ValueError("Max entity expansion depth reached")
+
+ external_entity_parser.Parse(sub_xml_contents, True)
+ expansion_depth -= 1
+ return 1
+
+ def assign_all_parser_callbacks(p):
+ p.StartElementHandler = start_element_handler
+ p.EndElementHandler = end_element_handler
+ p.ExternalEntityRefHandler = external_entity_ref_handler
+
+ assign_all_parser_callbacks(parser)
+ parser.Parse(xml_contents, True)
+
+ return treebuilder.close()
+
+
+# Older versions of python3 don't support ElementInclude.include's base_url
+# kwarg. This callable class works around this.
+# base_url allows XInclude paths relative to the toplevel XML file to be used.
+class ElementIncludeLoaderAdapter:
+ """Adapts between ElementInclude's loader interface and our XML parser."""
+
+ def __init__(self, base_url: str):
+ self.base_url = base_url
+
+ def __call__(self, href: str, parse: str):
+ if parse != "xml":
+ raise ValueError("'parse' must be 'xml'")
+
+ return parse_xml_no_xinclude_expansion(urllib.parse.urljoin(self.base_url, href))
+
+
+def parse_testsuites_from_file(filename: str) -> List[TestSuite]:
+ """Given an XML file, parse the contents into a List[TestSuite]."""
+ root = parse_xml_no_xinclude_expansion(filename)
+
+ base_url = os.path.abspath(filename)
+ loader = ElementIncludeLoaderAdapter(base_url)
+ xml.etree.ElementInclude.include(root, loader=loader)
+
+ if root.tag == "testsuites":
+ testsuites_xml_elements = [root]
+ elif root.tag == "document":
+ testsuites_xml_elements = root.findall("testsuites")
+ else:
+ raise ValueError(f"Unexpected root tag '{root.tag}' in {filename}")
+
+ testsuites = []
+
+ for testsuites_xml_element in testsuites_xml_elements:
+ testsuites += parse_testsuites_element_into_ir(testsuites_xml_element)
+
+ return testsuites
+
+
+def check_validity_of_names(testsuites: List[TestSuite]):
+ """Checks that all testsuite and testcase names are valid."""
+ testsuite_name_set = set()
+ for ts in testsuites:
+ if "/" in ts.name:
+ raise ValueError(f"ERROR: {args.xml_test_filename}: Invalid test suite name {ts.name}")
+
+ if ts.name in testsuite_name_set:
+ raise ValueError(
+ f"ERROR: {args.xml_test_filename}: Can't have 2 test suites named " f"{ts.name}"
+ )
+
+ testsuite_name_set.add(ts.name)
+
+ testcase_name_set = set()
+ for tc in ts.testcases:
+ if tc.name in testcase_name_set:
+ raise ValueError(
+ f"ERROR: {args.xml_test_filename}: Can't have 2 tests named " f"{tc.name}"
+ )
+
+ testcase_name_set.add(tc.name)
+
+
+def remove_skipped_tests(testsuites: List[TestSuite], skip_tests_filename: str):
+ """Remove skipped tests from testsuites based on skip_tests_filename."""
+ with open(skip_tests_filename) as skipped_file:
+ skipped_file_lines = skipped_file.readlines()
+ for i, l in enumerate(skipped_file_lines):
+ line = l.strip()
+
+ # Skip empty lines and comments
+ if not line or line[0] == "#":
+ continue
+
+ testsuite_name, sep, testcase_name = line.partition("/")
+
+ testsuite = find_element_with_name_or_return_none(testsuites, testsuite_name)
+
+ if not testsuite:
+ print(
+ f"WARNING: {skip_tests_filename}:{i + 1}: Test suite "
+ f"'{testsuite_name}' doesn't exist or has already been deleted."
+ )
+ continue
+
+ if not testcase_name:
+ print(f"INFO: Test suite '{testsuite_name}' will be skipped")
+ testsuites = list(filter(lambda x: x.name != testsuite_name, testsuites))
+ continue
+
+ testcase = find_element_with_name_or_return_none(testsuite.testcases, testcase_name)
+ if not testcase:
+ print(
+ f"WARNING: {skip_tests_filename}:{i + 1}: Test case "
+ f"'{testsuite_name}/{testcase_name} doesn't exist or has already "
+ "been deleted"
+ )
+ continue
+
+ print(f"INFO: Test case '{testsuite_name}/{testcase_name}' will be skipped.")
+ testsuite.testcases.remove(testcase)
+
+ return testsuites
+
+
+def generate_function_prototypes(testcases: List[TestCase]):
+ """Generates function prototypes for the provided list of testcases."""
+ return [f"test_result_t {t.function}(void);" for t in testcases]
+
+
+def generate_testcase_lists(testsuites: List[TestSuite]):
+ """Generates the lists that enumerate the individual testcases in each testsuite."""
+ testcase_lists_contents = []
+ testcase_index = 0
+ for i, testsuite in enumerate(testsuites):
+ testcase_lists_contents += [f"\nconst test_case_t testcases_{i}[] = {{"]
+ for testcase in testsuite.testcases:
+ testcase_lists_contents += [
+ f' {{ {testcase_index}, "{testcase.name}", '
+ f'"{testcase.description}", {testcase.function} }},'
+ ]
+ testcase_index += 1
+ testcase_lists_contents += [" { 0, NULL, NULL, NULL }"]
+ testcase_lists_contents += ["};\n"]
+
+ return testcase_lists_contents
+
+
+def generate_testsuite_lists(testsuites: List[TestSuite]):
+ """Generates the list of testsuites."""
+ testsuites_list_contents = []
+ testsuites_list_contents += ["const test_suite_t testsuites[] = {"]
+ for i, testsuite in enumerate(testsuites):
+ testsuites_list_contents += [
+ f' {{ "{testsuite.name}", "{testsuite.description}", testcases_{i} }},'
+ ]
+ testsuites_list_contents += [" { NULL, NULL, NULL }"]
+ testsuites_list_contents += ["};"]
+ return testsuites_list_contents
+
+
+def generate_file_from_template(
+ template_filename: str, output_filename: str, template: Dict[str, str]
+):
+ """Given a template file, generate an output file based on template dictionary."""
+ with open(template_filename) as template_fobj:
+ template_contents = template_fobj.read()
+
+ output_contents = template_contents
+ for to_find, to_replace in template.items():
+ output_contents = output_contents.replace(to_find, to_replace)
+
+ with open(output_filename, "w") as output_fobj:
+ output_fobj.write(output_contents)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "testlist_src_filename",
+ type=str,
+ help="Output source filename",
+ )
+ parser.add_argument(
+ "testlist_hdr_filename",
+ type=str,
+ help="Output header filename",
+ )
+ parser.add_argument("xml_test_filename", type=str, help="Input xml filename")
+ parser.add_argument(
+ "--plat-skip-file",
+ type=str,
+ help="Filename containing tests to skip for this platform",
+ dest="plat_skipped_list_filename",
+ required=False,
+ )
+ parser.add_argument(
+ "--arch-skip-file",
+ type=str,
+ help="Filename containing tests to skip for this architecture",
+ dest="arch_skipped_list_filename",
+ required=False,
+ )
+ args = parser.parse_args()
+
+ testsuites = parse_testsuites_from_file(args.xml_test_filename)
+
+ check_validity_of_names(testsuites)
+
+ if args.plat_skipped_list_filename:
+ testsuites = remove_skipped_tests(testsuites, args.plat_skipped_list_filename)
+
+ if args.arch_skipped_list_filename:
+ testsuites = remove_skipped_tests(testsuites, args.arch_skipped_list_filename)
+
+ # Flatten all testcases
+ combined_testcases = [tc for ts in testsuites for tc in ts.testcases]
+
+ # Generate header file
+ generate_file_from_template(
+ os.path.join(os.path.dirname(__file__), TESTS_LIST_H_TPL_FILENAME),
+ args.testlist_hdr_filename,
+ {TESTCASE_COUNT_TEMPLATE: str(len(combined_testcases))},
+ )
+
+ # Generate the source file
+ all_function_prototypes = generate_function_prototypes(combined_testcases)
+ testcase_lists_contents = generate_testcase_lists(testsuites)
+ testsuites_list_contents = generate_testsuite_lists(testsuites)
+
+ generate_file_from_template(
+ os.path.join(os.path.dirname(__file__), TESTS_LIST_C_TPL_FILENAME),
+ args.testlist_src_filename,
+ {
+ FUNCTION_PROTOTYPES_TEMPLATE: "\n".join(all_function_prototypes),
+ TESTCASE_LISTS_TEMPLATE: "\n".join(testcase_lists_contents),
+ TESTSUITES_LIST_TEMPLATE: "\n".join(testsuites_list_contents),
+ },
+ )
diff --git a/tools/generate_test_list/tests_list.c.tpl b/tools/generate_test_list/tests_list.c.tpl
new file mode 100644
index 000000000..115cb715a
--- /dev/null
+++ b/tools/generate_test_list/tests_list.c.tpl
@@ -0,0 +1,7 @@
+#include "tftf.h"
+
+{{function_prototypes}}
+
+{{testcase_lists}}
+
+{{testsuites_list}}
diff --git a/tools/generate_test_list/tests_list.h.tpl b/tools/generate_test_list/tests_list.h.tpl
new file mode 100644
index 000000000..963b6985f
--- /dev/null
+++ b/tools/generate_test_list/tests_list.h.tpl
@@ -0,0 +1,6 @@
+#ifndef __TEST_LIST_H__
+#define __TEST_LIST_H__
+
+#define TESTCASE_RESULT_COUNT {{testcase_count}}
+
+#endif // __TEST_LIST_H__